diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index 9943a983c4..267e647ffd 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -11,7 +11,7 @@ mv dist/* validdist || true # Compile wheels for PYTHON in /opt/python/*/bin/python; do - if [[ ! $PYTHON =~ (cp37|cp38|cp39|cp310|cp311|cp312) ]]; then + if [[ ! $PYTHON =~ (cp38|cp39|cp310|cp311|cp312) ]]; then continue fi # https://github.com/pypa/manylinux/issues/49 diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index 11cf1dd231..19f2b7f4aa 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -39,7 +39,6 @@ ls dist # Check for any unexpected files. unexpected=$(find dist \! \( -iname dist -or \ - -iname '*cp37*' -or \ -iname '*cp38*' -or \ -iname '*cp39*' -or \ -iname '*cp310*' -or \ diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index a3ed0c2f19..d30382fcee 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 37 38 39 310 311 312; do +for VERSION in 38 39 310 311 312; do _pythons=("C:/Python/Python${VERSION}/python.exe" \ "C:/Python/32/Python${VERSION}/python.exe") for PYTHON in "${_pythons[@]}"; do diff --git a/.evergreen/check-c-extensions.sh b/.evergreen/check-c-extensions.sh deleted file mode 100755 index cb51ceed4a..0000000000 --- a/.evergreen/check-c-extensions.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -set -o errexit # Exit the script with error if any of the commands fail - -# Supported/used environment variables: -# C_EXTENSIONS Pass --no_ext to skip installing the C extensions. - -PYTHON_IMPL=$(python -c "import platform; print(platform.python_implementation())") -if [ -z "$C_EXTENSIONS" ] && [ "$PYTHON_IMPL" = "CPython" ]; then - PYMONGO_C_EXT_MUST_BUILD=1 python setup.py build_ext -i - python tools/fail_if_no_c.py -fi diff --git a/.evergreen/combine-coverage.sh b/.evergreen/combine-coverage.sh index f4aa3c29af..7db4a6cbc2 100644 --- a/.evergreen/combine-coverage.sh +++ b/.evergreen/combine-coverage.sh @@ -13,9 +13,9 @@ if [ -z "$PYTHON_BINARY" ]; then fi createvirtualenv "$PYTHON_BINARY" covenv -# coverage 7.3 dropped support for Python 3.7, keep in sync with run-tests.sh +# Keep in sync with run-tests.sh # coverage >=5 is needed for relative_files=true. -pip install -q "coverage>=5,<7.3" +pip install -q "coverage>=5,<=7.5" pip list ls -la coverage/ diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f66b055362..12cce5bf77 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -12,7 +12,7 @@ stepback: true # Actual testing tasks are marked with `type: test` command_type: system -# Protect ourself against rogue test case, or curl gone wild, that runs forever +# Protect ourselves against rogue test case, or curl gone wild, that runs forever # Good rule of thumb: the averageish length a task takes, times 5 # That roughly accounts for variable system performance for various buildvariants exec_timeout_secs: 3600 # 60 minutes is the longest we'll ever run (primarily @@ -27,7 +27,7 @@ timeout: functions: "fetch source": - # Executes git clone and applies the submitted patch, if any + # Executes clone and applies the submitted patch, if any - command: git.get_project params: directory: "src" @@ -58,14 +58,12 @@ functions: export MONGO_ORCHESTRATION_HOME="$DRIVERS_TOOLS/.evergreen/orchestration" export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" - export UPLOAD_BUCKET="${project}" cat < expansion.yml CURRENT_VERSION: "$CURRENT_VERSION" DRIVERS_TOOLS: "$DRIVERS_TOOLS" MONGO_ORCHESTRATION_HOME: "$MONGO_ORCHESTRATION_HOME" MONGODB_BINARIES: "$MONGODB_BINARIES" - UPLOAD_BUCKET: "$UPLOAD_BUCKET" PROJECT_DIRECTORY: "$PROJECT_DIRECTORY" PREPARE_SHELL: | set -o errexit @@ -73,7 +71,6 @@ functions: export DRIVERS_TOOLS="$DRIVERS_TOOLS" export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" export MONGODB_BINARIES="$MONGODB_BINARIES" - export UPLOAD_BUCKET="$UPLOAD_BUCKET" export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" @@ -103,30 +100,35 @@ functions: echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > $MONGO_ORCHESTRATION_HOME/orchestration.config "upload coverage" : + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} - command: s3.put params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} local_file: src/.coverage optional: true # Upload the coverage report for all tasks in a single build to the same directory. - remote_file: ${UPLOAD_BUCKET}/coverage/${revision}/${version_id}/coverage/coverage.${build_variant}.${task_name} - bucket: mciuploads + remote_file: coverage/${revision}/${version_id}/coverage/coverage.${build_variant}.${task_name} + bucket: ${bucket_name} permissions: public-read content_type: text/html display_name: "Raw Coverage Report" "download and merge coverage" : + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} - command: shell.exec params: - silent: true working_dir: "src" + silent: true + include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] script: | - export AWS_ACCESS_KEY_ID=${aws_key} - export AWS_SECRET_ACCESS_KEY=${aws_secret} - # Download all the task coverage files. - aws s3 cp --recursive s3://mciuploads/${UPLOAD_BUCKET}/coverage/${revision}/${version_id}/coverage/ coverage/ + aws s3 cp --recursive s3://${bucket_name}/coverage/${revision}/${version_id}/coverage/ coverage/ - command: shell.exec params: working_dir: "src" @@ -136,20 +138,20 @@ functions: # Upload the resulting html coverage report. - command: shell.exec params: - silent: true working_dir: "src" + silent: true + include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] script: | - export AWS_ACCESS_KEY_ID=${aws_key} - export AWS_SECRET_ACCESS_KEY=${aws_secret} - aws s3 cp htmlcov/ s3://mciuploads/${UPLOAD_BUCKET}/coverage/${revision}/${version_id}/htmlcov/ --recursive --acl public-read --region us-east-1 + aws s3 cp htmlcov/ s3://${bucket_name}/coverage/${revision}/${version_id}/htmlcov/ --recursive --acl public-read --region us-east-1 # Attach the index.html with s3.put so it shows up in the Evergreen UI. - command: s3.put params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} local_file: src/htmlcov/index.html - remote_file: ${UPLOAD_BUCKET}/coverage/${revision}/${version_id}/htmlcov/index.html - bucket: mciuploads + remote_file: coverage/${revision}/${version_id}/htmlcov/index.html + bucket: ${bucket_name} permissions: public-read content_type: text/html display_name: "Coverage Report HTML" @@ -172,34 +174,40 @@ functions: include: - "./**.core" - "./**.mdmp" # Windows: minidumps + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} - command: s3.put params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} local_file: mongo-coredumps.tgz - remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/coredumps/${task_id}-${execution}-mongodb-coredumps.tar.gz - bucket: mciuploads + remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/coredumps/${task_id}-${execution}-mongodb-coredumps.tar.gz + bucket: ${bucket_name} permissions: public-read content_type: ${content_type|application/gzip} display_name: Core Dumps - Execution optional: true - command: s3.put params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} local_file: mongodb-logs.tar.gz - remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-mongodb-logs.tar.gz - bucket: mciuploads + remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-mongodb-logs.tar.gz + bucket: ${bucket_name} permissions: public-read content_type: ${content_type|application/x-gzip} display_name: "mongodb-logs.tar.gz" - command: s3.put params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} local_file: drivers-tools/.evergreen/orchestration/server.log - remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-orchestration.log - bucket: mciuploads + remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-orchestration.log + bucket: ${bucket_name} permissions: public-read content_type: ${content_type|text/plain} display_name: "orchestration.log" @@ -211,13 +219,17 @@ functions: source_dir: ${PROJECT_DIRECTORY}/ include: - "./**" + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} - command: s3.put params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} local_file: working-dir.tar.gz - remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/artifacts/${task_id}-${execution}-working-dir.tar.gz - bucket: mciuploads + remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/artifacts/${task_id}-${execution}-working-dir.tar.gz + bucket: ${bucket_name} permissions: public-read content_type: ${content_type|application/x-gzip} display_name: "working-dir.tar.gz" @@ -232,11 +244,12 @@ functions: - "*.lock" - command: s3.put params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} local_file: drivers-dir.tar.gz - remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/artifacts/${task_id}-${execution}-drivers-dir.tar.gz - bucket: mciuploads + remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/artifacts/${task_id}-${execution}-drivers-dir.tar.gz + bucket: ${bucket_name} permissions: public-read content_type: ${content_type|application/x-gzip} display_name: "drivers-dir.tar.gz" @@ -313,18 +326,15 @@ functions: params: script: | ${PREPARE_SHELL} - set -o xtrace - # The mongohouse build script needs to be passed the VARIANT variable, see - # https://github.com/10gen/mongohouse/blob/973cc11/evergreen.yaml#L65 - VARIANT=rhel84-small bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh + bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/pull-mongohouse-image.sh - command: shell.exec type: setup params: - background: true script: | ${PREPARE_SHELL} - set -o xtrace - bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-local.sh + bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-image.sh + sleep 1 + docker ps "stop mongo-orchestration": - command: shell.exec @@ -368,74 +378,22 @@ functions: PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m doc-test "run tests": - # If testing FLE, start the KMS mock servers, first create the virtualenv. - - command: shell.exec - params: - script: | - if [ -n "${test_encryption}" ]; then - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/csfle - . ./activate-kmstlsvenv.sh - fi - # Run in the background so the mock servers don't block the EVG task. - - command: shell.exec - params: - background: true - script: | - if [ -n "${test_encryption}" ]; then - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/csfle - . ./activate-kmstlsvenv.sh - # The -u options forces the stdout and stderr streams to be unbuffered. - # TMPDIR is required to avoid "AF_UNIX path too long" errors. - TMPDIR="$(dirname $DRIVERS_TOOLS)" python -u kms_kmip_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 5698 & - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 8002 --require_client_cert & - fi - # Wait up to 10 seconds for the KMIP server to start. - command: shell.exec params: - script: | - if [ -n "${test_encryption}" ]; then - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/csfle - . ./activate-kmstlsvenv.sh - for i in $(seq 1 1 10); do - sleep 1 - if python -u kms_kmip_client.py; then - echo 'KMS KMIP server started!' - exit 0 - fi - done - echo 'Failed to start KMIP server!' - exit 1 - fi - - command: shell.exec - type: test - params: - silent: true working_dir: "src" + shell: bash + include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] script: | + ${PREPARE_SHELL} if [ -n "${test_encryption}" ]; then - cat < fle_creds.sh - export FLE_AWS_KEY="${fle_aws_key}" - export FLE_AWS_SECRET="${fle_aws_secret}" - export FLE_AZURE_CLIENTID="${fle_azure_clientid}" - export FLE_AZURE_TENANTID="${fle_azure_tenantid}" - export FLE_AZURE_CLIENTSECRET="${fle_azure_clientsecret}" - export FLE_GCP_EMAIL="${fle_gcp_email}" - export FLE_GCP_PRIVATEKEY="${fle_gcp_privatekey}" - # Needed for generating temporary aws credentials. - export AWS_ACCESS_KEY_ID="${fle_aws_key}" - export AWS_SECRET_ACCESS_KEY="${fle_aws_secret}" - export AWS_DEFAULT_REGION=us-east-1 - EOT + . .evergreen/tox.sh -m setup-encryption fi - command: shell.exec type: test params: working_dir: "src" + shell: bash + include_expansions_in_env: ["DRIVERS_ATLAS_LAMBDA_USER", "DRIVERS_ATLAS_LAMBDA_PASSWORD"] script: | # Disable xtrace set +x @@ -449,9 +407,6 @@ functions: if [ -n "${test_encryption}" ]; then # Disable xtrace (just in case it was accidentally set). set +x - . ./fle_creds.sh - rm -f ./fle_creds.sh - export LIBMONGOCRYPT_URL="${libmongocrypt_url}" export TEST_ENCRYPTION=1 if [ -n "${test_encryption_pyopenssl}" ]; then export TEST_ENCRYPTION_PYOPENSSL=1 @@ -474,22 +429,17 @@ functions: fi if [ -n "${test_serverless}" ]; then export TEST_SERVERLESS=1 - export SERVERLESS_ATLAS_USER="${SERVERLESS_ATLAS_USER}" - export SERVERLESS_ATLAS_PASSWORD="${SERVERLESS_ATLAS_PASSWORD}" - export MONGODB_URI="${SERVERLESS_URI}" - export SINGLE_MONGOS_LB_URI="${MONGODB_URI}" - export MULTI_MONGOS_LB_URI="${MONGODB_URI}" + fi + if [ -n "${IS_SERVERLESS_PROXY}" ]; then + export IS_SERVERLESS_PROXY=1 fi if [ -n "${TEST_INDEX_MANAGEMENT}" ]; then export TEST_INDEX_MANAGEMENT=1 - export MONGODB_URI="${TEST_INDEX_URI}" - export DB_USER="${DRIVERS_ATLAS_LAMBDA_USER}" - export DB_PASSWORD="${DRIVERS_ATLAS_LAMBDA_PASSWORD}" fi GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ PYTHON_BINARY=${PYTHON_BINARY} \ - C_EXTENSIONS=${C_EXTENSIONS} \ + NO_EXT=${NO_EXT} \ COVERAGE=${COVERAGE} \ COMPRESSORS=${COMPRESSORS} \ AUTH=${AUTH} \ @@ -499,23 +449,15 @@ functions: bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m test-eg "run enterprise auth tests": - - command: ec2.assume_role - params: - role_arn: ${aws_test_secrets_role} - command: shell.exec type: test params: working_dir: "src" + include_expansions_in_env: ["DRIVERS_TOOLS", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] script: | # Disable xtrace for security reasons (just in case it was accidentally set). set +x - - DRIVERS_TOOLS="${DRIVERS_TOOLS}" \ - AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID}" \ - AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY}" \ - AWS_SESSION_TOKEN="${AWS_SESSION_TOKEN}" \ - bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m aws-secrets -- drivers/enterprise_auth - + bash ${DRIVERS_TOOLS}/.evergreen/auth_aws/setup_secrets.sh drivers/enterprise_auth PROJECT_DIRECTORY="${PROJECT_DIRECTORY}" \ PYTHON_BINARY="${PYTHON_BINARY}" \ TEST_ENTERPRISE_AUTH=1 \ @@ -523,43 +465,29 @@ functions: bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m test-eg "run atlas tests": - - command: ec2.assume_role - params: - role_arn: ${aws_test_secrets_role} - command: shell.exec type: test params: - include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + include_expansions_in_env: ["DRIVERS_TOOLS", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] working_dir: "src" script: | # Disable xtrace for security reasons (just in case it was accidentally set). set +x set -o errexit - - DRIVERS_TOOLS="${DRIVERS_TOOLS}" \ - AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID}" \ - AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY}" \ - AWS_SESSION_TOKEN="${AWS_SESSION_TOKEN}" \ - bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m aws-secrets -- drivers/atlas_connect - + bash ${DRIVERS_TOOLS}/.evergreen/auth_aws/setup_secrets.sh drivers/atlas_connect PROJECT_DIRECTORY="${PROJECT_DIRECTORY}" \ PYTHON_BINARY="${PYTHON_BINARY}" \ TEST_ATLAS=1 \ bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m test-eg "get aws auth secrets": - - command: ec2.assume_role - params: - role_arn: ${aws_test_secrets_role} - - command: shell.exec + - command: subprocess.exec type: test params: - add_expansions_to_env: true - working_dir: "src" - script: | - ${PREPARE_SHELL} - cd $DRIVERS_TOOLS/.evergreen/auth_aws - ./setup_secrets.sh drivers/aws_auth + include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_aws/setup-secrets.sh "run aws auth test with regular aws credentials": - command: shell.exec @@ -612,19 +540,15 @@ functions: AWS_ROLE_SESSION_NAME="test" \ .evergreen/run-mongodb-aws-test.sh web-identity - "run oidc auth test with aws credentials": - - command: ec2.assume_role - params: - role_arn: ${aws_test_secrets_role} - - command: shell.exec + "run oidc auth test with test credentials": + - command: subprocess.exec type: test params: working_dir: "src" - shell: bash - include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - script: | - ${PREPARE_SHELL} - bash .evergreen/run-mongodb-oidc-test.sh + binary: bash + include_expansions_in_env: ["DRIVERS_TOOLS", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + args: + - .evergreen/run-mongodb-oidc-test.sh "run aws auth test with aws credentials as environment variables": - command: shell.exec @@ -658,13 +582,22 @@ functions: exit 0 fi ${PREPARE_SHELL} - .evergreen/run-mongodb-aws-test.sh session-creds + set -ex + cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + . ./activate-authawsvenv.sh + . aws_setup.sh ecs + export MONGODB_BINARIES="${MONGODB_BINARIES}"; + export PROJECT_DIRECTORY="${PROJECT_DIRECTORY}"; + python aws_tester.py ecs + cd - "cleanup": - command: shell.exec params: + working_dir: "src" script: | ${PREPARE_SHELL} + . .evergreen/tox.sh -m teardown-encryption rm -rf $DRIVERS_TOOLS || true rm -f ./secrets-export.sh || true @@ -720,6 +653,25 @@ functions: # Don't use ${file} syntax here because evergreen treats it as an empty expansion. [ -f "$file" ] && bash $file || echo "$file not available, skipping" + "assume ec2 role": + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} + + "setup atlas": + - command: subprocess.exec + params: + binary: bash + include_expansions_in_env: ["task_id", "execution"] + env: + MONGODB_VERSION: "7.0" + LAMBDA_STACK_NAME: dbx-python-lambda + args: + - ${DRIVERS_TOOLS}/.evergreen/atlas/setup-atlas-cluster.sh + - command: expansions.update + params: + file: atlas-expansion.yml + "run-ocsp-test": - command: shell.exec type: test @@ -828,6 +780,13 @@ functions: python ./lib/aws_assign_instance_profile.py fi + "teardown atlas": + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh + "build release": - command: shell.exec type: test @@ -845,27 +804,32 @@ functions: source_dir: "src/dist" include: - "*" + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} - command: s3.put params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} local_file: release-files.tgz - remote_file: ${UPLOAD_BUCKET}/release/${revision}/${task_id}-${execution}-release-files.tar.gz - bucket: mciuploads + remote_file: release/${revision}/${task_id}-${execution}-release-files.tar.gz + bucket: ${bucket_name} permissions: public-read content_type: ${content_type|application/gzip} display_name: Release files "download and merge releases": + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} - command: shell.exec params: silent: true + include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] script: | - export AWS_ACCESS_KEY_ID=${aws_key} - export AWS_SECRET_ACCESS_KEY=${aws_secret} - # Download all the task coverage files. - aws s3 cp --recursive s3://mciuploads/${UPLOAD_BUCKET}/release/${revision}/ release/ + aws s3 cp --recursive s3://${bucket_name}/release/${revision}/ release/ - command: shell.exec params: shell: "bash" @@ -888,8 +852,8 @@ functions: done # Build source distribution. cd src/ - /opt/python/3.7/bin/python3 -m pip install build - /opt/python/3.7/bin/python3 -m build --sdist . + /opt/python/3.8/bin/python3 -m pip install build + /opt/python/3.8/bin/python3 -m build --sdist . cp dist/* ../releases - command: archive.targz_pack params: @@ -899,15 +863,35 @@ functions: - "*" - command: s3.put params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} local_file: release-files-all.tgz - remote_file: ${UPLOAD_BUCKET}/release-all/${revision}/${task_id}-${execution}-release-files-all.tar.gz - bucket: mciuploads + remote_file: release-all/${revision}/${task_id}-${execution}-release-files-all.tar.gz + bucket: ${bucket_name} permissions: public-read content_type: ${content_type|application/gzip} display_name: Release files all + "run perf tests": + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-perf-tests.sh + + "attach benchmark test results": + - command: attach.results + params: + file_location: src/report.json + + "send dashboard data": + - command: perf.send + params: + file: src/results.json + pre: - func: "fetch source" - func: "prepare resources" @@ -916,6 +900,7 @@ pre: - func: "init test-results" - func: "make files executable" - func: "install dependencies" + - func: "assume ec2 role" post: # Disabled, causing timeouts @@ -935,31 +920,19 @@ task_groups: setup_group: - func: "fetch source" - func: "prepare resources" - - command: shell.exec - params: - shell: "bash" - script: | - ${PREPARE_SHELL} - set +o xtrace - LOADBALANCED=ON \ - SERVERLESS_DRIVERS_GROUP=${SERVERLESS_DRIVERS_GROUP} \ - SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \ - SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \ - bash ${DRIVERS_TOOLS}/.evergreen/serverless/create-instance.sh - - command: expansions.update + - command: subprocess.exec params: - file: serverless-expansion.yml - teardown_group: - - command: shell.exec + binary: bash + env: + VAULT_NAME: ${VAULT_NAME} + args: + - ${DRIVERS_TOOLS}/.evergreen/serverless/create-instance.sh + teardown_task: + - command: subprocess.exec params: - script: | - ${PREPARE_SHELL} - set +o xtrace - SERVERLESS_DRIVERS_GROUP=${SERVERLESS_DRIVERS_GROUP} \ - SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \ - SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \ - SERVERLESS_INSTANCE_NAME=${SERVERLESS_INSTANCE_NAME} \ - bash ${DRIVERS_TOOLS}/.evergreen/serverless/delete-instance.sh + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/serverless/delete-instance.sh - func: "upload test results" tasks: - ".serverless" @@ -972,32 +945,17 @@ task_groups: - func: prepare resources - func: fix absolute paths - func: make files executable - - command: shell.exec - params: - shell: "bash" - script: | - ${PREPARE_SHELL} - echo '${testgcpkms_key_file}' > /tmp/testgcpkms_key_file.json - export GCPKMS_KEYFILE=/tmp/testgcpkms_key_file.json - export GCPKMS_DRIVERS_TOOLS=$DRIVERS_TOOLS - export GCPKMS_SERVICEACCOUNT="${testgcpkms_service_account}" - export GCPKMS_MACHINETYPE="e2-standard-4" - $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/create-and-setup-instance.sh - # Load the GCPKMS_GCLOUD, GCPKMS_INSTANCE, GCPKMS_REGION, and GCPKMS_ZONE expansions. - - command: expansions.update + - command: subprocess.exec params: - file: testgcpkms-expansions.yml - teardown_group: - - command: shell.exec + binary: "bash" + args: + - ${DRIVERS_TOOLS}/.evergreen/csfle/gcpkms/create-and-setup-instance.sh + teardown_task: + - command: subprocess.exec params: - shell: "bash" - script: | - ${PREPARE_SHELL} - export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} - export GCPKMS_PROJECT=${GCPKMS_PROJECT} - export GCPKMS_ZONE=${GCPKMS_ZONE} - export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} - $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/delete-instance.sh + binary: "bash" + args: + - ${DRIVERS_TOOLS}/.evergreen/csfle/gcpkms/delete-instance.sh - func: "upload test results" tasks: - testgcpkms-task @@ -1008,70 +966,51 @@ task_groups: - func: prepare resources - func: fix absolute paths - func: make files executable - - command: shell.exec - params: - shell: bash - script: |- - ${PREPARE_SHELL} - # Get azurekms credentials from the vault. - bash $DRIVERS_TOOLS/.evergreen/auth_aws/setup_secrets.sh drivers/azurekms - source ./secrets-export.sh - export AZUREKMS_VMNAME_PREFIX="PYTHON_DRIVER" - export AZUREKMS_DRIVERS_TOOLS="$DRIVERS_TOOLS" - $DRIVERS_TOOLS/.evergreen/csfle/azurekms/create-and-setup-vm.sh - - command: expansions.update + - command: subprocess.exec params: - file: testazurekms-expansions.yml + binary: bash + env: + AZUREKMS_VMNAME_PREFIX: "PYTHON_DRIVER" + args: + - ${DRIVERS_TOOLS}/.evergreen/csfle/azurekms/create-and-setup-vm.sh teardown_group: - # Load expansions again. The setup task may have failed before running `expansions.update`. - - command: expansions.update - params: - file: testazurekms-expansions.yml - - command: shell.exec + - command: subprocess.exec params: - shell: bash - script: |- - ${PREPARE_SHELL} - set -x - export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} - export AZUREKMS_RESOURCEGROUP=${AZUREKMS_RESOURCEGROUP} - export AZUREKMS_SCOPE=${AZUREKMS_SCOPE} - $DRIVERS_TOOLS/.evergreen/csfle/azurekms/delete-vm.sh + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/csfle/azurekms/delete-vm.sh - func: "upload test results" setup_group_can_fail_task: true - teardown_group_can_fail_task: true + teardown_task_can_fail_task: true setup_group_timeout_secs: 1800 tasks: - testazurekms-task - - name: test_aws_lambda_task_group + - name: testazureoidc_task_group setup_group: - func: fetch source - func: prepare resources + - func: fix absolute paths + - func: make files executable - command: subprocess.exec params: - working_dir: src binary: bash - add_expansions_to_env: true + env: + AZUREOIDC_VMNAME_PREFIX: "PYTHON_DRIVER" args: - - ${DRIVERS_TOOLS}/.evergreen/atlas/setup-atlas-cluster.sh - - command: expansions.update - params: - file: src/atlas-expansion.yml - teardown_group: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/azure/create-and-setup-vm.sh + teardown_task: - command: subprocess.exec params: - working_dir: src binary: bash - add_expansions_to_env: true args: - - ${DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/azure/delete-vm.sh setup_group_can_fail_task: true setup_group_timeout_secs: 1800 tasks: - - test-aws-lambda-deployed + - oidc-auth-test-azure - - name: test_atlas_task_group_search_indexes + - name: testgcpoidc_task_group setup_group: - func: fetch source - func: prepare resources @@ -1079,33 +1018,70 @@ task_groups: - func: make files executable - command: subprocess.exec params: - working_dir: src binary: bash - add_expansions_to_env: true env: - MONGODB_VERSION: "7.0" + GCPOIDC_VMNAME_PREFIX: "PYTHON_DRIVER" args: - - ${DRIVERS_TOOLS}/.evergreen/atlas/setup-atlas-cluster.sh - - command: expansions.update - params: - file: src/atlas-expansion.yml - - command: shell.exec + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/gcp/setup.sh + teardown_task: + - command: subprocess.exec params: - working_dir: src - shell: bash - script: |- - echo "TEST_INDEX_URI: ${MONGODB_URI}" > atlas-expansion.yml - - command: expansions.update + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/gcp/teardown.sh + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + tasks: + - oidc-auth-test-gcp + + - name: testoidc_task_group + setup_group: + - func: fetch source + - func: prepare resources + - func: fix absolute paths + - func: make files executable + - func: "assume ec2 role" + - command: subprocess.exec params: - file: src/atlas-expansion.yml - teardown_group: + binary: bash + include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + env: + # PYTHON-4447 + MONGODB_VERSION: "8.0" + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/setup.sh + teardown_task: - command: subprocess.exec params: - working_dir: src binary: bash - add_expansions_to_env: true args: - - ${DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/teardown.sh + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + tasks: + - oidc-auth-test + + - name: test_aws_lambda_task_group + setup_group: + - func: fetch source + - func: prepare resources + - func: setup atlas + teardown_task: + - func: teardown atlas + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + tasks: + - test-aws-lambda-deployed + + - name: test_atlas_task_group_search_indexes + setup_group: + - func: fetch source + - func: prepare resources + - func: fix absolute paths + - func: make files executable + - func: setup atlas + teardown_task: + - func: teardown atlas setup_group_can_fail_task: true setup_group_timeout_secs: 1800 tasks: @@ -1141,7 +1117,7 @@ tasks: genhtml --version || true valgrind --version || true - - name: "release-mac-1100" + - name: "release-mac" tags: ["release_tag"] run_on: macos-1100 commands: @@ -1167,14 +1143,6 @@ tasks: VERSION: "3.8" - func: "upload release" - - name: "release-mac-1014" - tags: ["release_tag"] - run_on: macos-1014 - commands: - - func: "build release" - vars: - VERSION: "3.7" - - name: "release-windows" tags: ["release_tag"] run_on: windows-64-vsMulti-small @@ -1393,6 +1361,33 @@ tasks: TOPOLOGY: "sharded_cluster" - func: "run tests" + - name: "test-8.0-standalone" + tags: ["8.0", "standalone"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "8.0" + TOPOLOGY: "server" + - func: "run tests" + + - name: "test-8.0-replica_set" + tags: ["8.0", "replica_set"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "8.0" + TOPOLOGY: "replica_set" + - func: "run tests" + + - name: "test-8.0-sharded_cluster" + tags: ["8.0", "sharded_cluster"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "8.0" + TOPOLOGY: "sharded_cluster" + - func: "run tests" + - name: "test-7.0-standalone" tags: ["7.0", "standalone"] commands: @@ -1486,6 +1481,7 @@ tasks: vars: VERSION: "latest" TOPOLOGY: "server" + - func: "assume ec2 role" - func: "run enterprise auth tests" - name: "test-search-index-helpers" @@ -1546,6 +1542,7 @@ tasks: - name: "atlas-connect" tags: ["atlas-connect"] commands: + - func: "assume ec2 role" - func: "run atlas tests" - name: atlas-data-lake-tests @@ -1904,6 +1901,7 @@ tasks: ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" VERSION: "4.4" + - func: "assume ec2 role" - func: "get aws auth secrets" - func: "run aws auth test with regular aws credentials" - func: "run aws auth test with assume role credentials" @@ -1921,6 +1919,7 @@ tasks: ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" VERSION: "5.0" + - func: "assume ec2 role" - func: "get aws auth secrets" - func: "run aws auth test with regular aws credentials" - func: "run aws auth test with assume role credentials" @@ -1938,6 +1937,7 @@ tasks: ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" VERSION: "6.0" + - func: "assume ec2 role" - func: "get aws auth secrets" - func: "run aws auth test with regular aws credentials" - func: "run aws auth test with assume role credentials" @@ -1955,6 +1955,25 @@ tasks: ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" VERSION: "7.0" + - func: "assume ec2 role" + - func: "get aws auth secrets" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" + - func: "run aws ECS auth test" + + - name: "aws-auth-test-8.0" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "8.0" + - func: "assume ec2 role" - func: "get aws auth secrets" - func: "run aws auth test with regular aws credentials" - func: "run aws auth test with assume role credentials" @@ -1972,6 +1991,7 @@ tasks: ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" VERSION: "rapid" + - func: "assume ec2 role" - func: "get aws auth secrets" - func: "run aws auth test with regular aws credentials" - func: "run aws auth test with assume role credentials" @@ -1989,6 +2009,7 @@ tasks: ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" VERSION: "latest" + - func: "assume ec2 role" - func: "get aws auth secrets" - func: "run aws auth test with regular aws credentials" - func: "run aws auth test with assume role credentials" @@ -1998,10 +2019,6 @@ tasks: - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - - name: "oidc-auth-test-latest" - commands: - - func: "run oidc auth test with aws credentials" - - name: load-balancer-test commands: - func: "bootstrap mongo-orchestration" @@ -2011,6 +2028,47 @@ tasks: - func: "run load-balancer" - func: "run tests" + - name: "oidc-auth-test" + commands: + - func: "run oidc auth test with test credentials" + + - name: "oidc-auth-test-azure" + commands: + - command: shell.exec + type: test + params: + shell: bash + script: |- + set -o errexit + ${PREPARE_SHELL} + cd src + git add . + git commit -m "add files" + export AZUREOIDC_DRIVERS_TAR_FILE=/tmp/mongo-python-driver.tgz + git archive -o $AZUREOIDC_DRIVERS_TAR_FILE HEAD + export AZUREOIDC_TEST_CMD="OIDC_ENV=azure ./.evergreen/run-mongodb-oidc-test.sh" + bash $DRIVERS_TOOLS/.evergreen/auth_oidc/azure/run-driver-test.sh + + - name: "oidc-auth-test-gcp" + commands: + - command: shell.exec + type: test + params: + shell: bash + script: |- + set -o errexit + ${PREPARE_SHELL} + cd src + git add . + git commit -m "add files" + export GCPOIDC_DRIVERS_TAR_FILE=/tmp/mongo-python-driver.tgz + git archive -o $GCPOIDC_DRIVERS_TAR_FILE HEAD + # Define the command to run on the VM. + # Ensure that we source the environment file created for us, set up any other variables we need, + # and then run our test suite on the vm. + export GCPOIDC_TEST_CMD="OIDC_ENV=gcp ./.evergreen/run-mongodb-oidc-test.sh" + bash $DRIVERS_TOOLS/.evergreen/auth_oidc/gcp/run-driver-test.sh + - name: "test-fips-standalone" tags: ["fips"] commands: @@ -2044,36 +2102,14 @@ tasks: - name: "testgcpkms-task" commands: - - command: shell.exec + - command: subprocess.exec type: setup params: working_dir: "src" - shell: "bash" - script: | - ${PREPARE_SHELL} - echo "Copying files ... begin" - export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} - export GCPKMS_PROJECT=${GCPKMS_PROJECT} - export GCPKMS_ZONE=${GCPKMS_ZONE} - export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} - tar czf /tmp/mongo-python-driver.tgz . - GCPKMS_SRC=/tmp/mongo-python-driver.tgz GCPKMS_DST=$GCPKMS_INSTANCENAME: $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/copy-file.sh - echo "Copying files ... end" - echo "Untarring file ... begin" - GCPKMS_CMD="tar xf mongo-python-driver.tgz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh - echo "Untarring file ... end" - - command: shell.exec - type: test - params: - working_dir: "src" - shell: "bash" - script: | - ${PREPARE_SHELL} - export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} - export GCPKMS_PROJECT=${GCPKMS_PROJECT} - export GCPKMS_ZONE=${GCPKMS_ZONE} - export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} - GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz ./.evergreen/tox.sh -m test-eg" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh + binary: "bash" + include_expansions_in_env: ["DRIVERS_TOOLS"] + args: + - .evergreen/run-gcpkms-test.sh - name: "testgcpkms-fail-task" # testgcpkms-fail-task runs in a non-GCE environment. @@ -2096,40 +2132,13 @@ tasks: - name: testazurekms-task commands: - - command: shell.exec - params: - shell: bash - script: |- - set -o errexit - ${PREPARE_SHELL} - source ./secrets-export.sh - cd src - echo "Copying files ... begin" - export AZUREKMS_RESOURCEGROUP=${AZUREKMS_RESOURCEGROUP} - export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} - export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey - tar czf /tmp/mongo-python-driver.tgz . - AZUREKMS_SRC="/tmp/mongo-python-driver.tgz" \ - AZUREKMS_DST="~/" \ - $DRIVERS_TOOLS/.evergreen/csfle/azurekms/copy-file.sh - echo "Copying files ... end" - echo "Untarring file ... begin" - AZUREKMS_CMD="tar xf mongo-python-driver.tgz" \ - $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh - echo "Untarring file ... end" - - command: shell.exec - type: test + - command: subprocess.exec params: - shell: bash - script: |- - set -o errexit - ${PREPARE_SHELL} - source ./secrets-export.sh - export AZUREKMS_RESOURCEGROUP=${AZUREKMS_RESOURCEGROUP} - export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} - export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey - AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz SUCCESS=true TEST_FLE_AZURE_AUTO=1 ./.evergreen/tox.sh -m test-eg" \ - $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh + binary: bash + working_dir: src + include_expansions_in_env: ["DRIVERS_TOOLS"] + args: + - .evergreen/run-azurekms-test.sh - name: testazurekms-fail-task commands: @@ -2139,37 +2148,75 @@ tasks: vars: VERSION: "latest" TOPOLOGY: "server" - - command: shell.exec + - command: subprocess.exec type: test params: - shell: bash - script: |- - set -o errexit - ${PREPARE_SHELL} - # Get azurekms credentials from the vault. - bash $DRIVERS_TOOLS/.evergreen/auth_aws/setup_secrets.sh drivers/azurekms - source ./secrets-export.sh - cd src - PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 \ - KEY_NAME="${AZUREKMS_KEYNAME}" \ - KEY_VAULT_ENDPOINT="${AZUREKMS_KEYVAULTENDPOINT}" \ - LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz \ - SUCCESS=false TEST_FLE_AZURE_AUTO=1 \ - ./.evergreen/tox.sh -m test-eg + binary: bash + working_dir: src + include_expansions_in_env: ["DRIVERS_TOOLS"] + args: + - .evergreen/run-azurekms-fail-test.sh + + - name: "perf-6.0-standalone" + tags: ["perf"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "v6.0-perf" + TOPOLOGY: "server" + - func: "run perf tests" + - func: "attach benchmark test results" + - func: "send dashboard data" + + - name: "perf-6.0-standalone-ssl" + tags: ["perf"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "v6.0-perf" + TOPOLOGY: "server" + SSL: "ssl" + - func: "run perf tests" + - func: "attach benchmark test results" + - func: "send dashboard data" + + - name: "assign-pr-reviewer" + tags: ["pr"] + allowed_requesters: ["patch", "github_pr"] + commands: + - command: shell.exec + type: test + params: + shell: "bash" + working_dir: src + script: | + ${PREPARE_SHELL} + set -x + export CONFIG=$PROJECT_DIRECTORY/.github/reviewers.txt + export SCRIPT="$DRIVERS_TOOLS/.evergreen/github_app/assign-reviewer.sh" + bash $SCRIPT -p $CONFIG -h ${github_commit} -o "mongodb" -n "mongo-python-driver" + echo '{"results": [{ "status": "PASS", "test_file": "Build", "log_raw": "Test completed" } ]}' > ${PROJECT_DIRECTORY}/test-results.json + + - name: "check-import-time" + tags: ["pr"] + commands: + - command: shell.exec + type: test + params: + shell: "bash" + working_dir: src + script: | + ${PREPARE_SHELL} + set -x + export BASE_SHA=${revision} + export HEAD_SHA=${github_commit} + bash .evergreen/run-import-time-test.sh axes: # Choice of distro - id: platform display_name: OS values: - - id: macos-1014 - display_name: "macOS 10.14" - run_on: macos-1014 - variables: - skip_EC2_auth_test: true - skip_ECS_auth_test: true - skip_web_identity_auth_test: true - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: macos-1100 display_name: "macOS 11.00" run_on: macos-1100 @@ -2177,7 +2224,6 @@ axes: skip_EC2_auth_test: true skip_ECS_auth_test: true skip_web_identity_auth_test: true - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: macos-1100-arm64 display_name: "macOS 11.00 Arm64" run_on: macos-1100-arm64 @@ -2185,25 +2231,18 @@ axes: skip_EC2_auth_test: true skip_ECS_auth_test: true skip_web_identity_auth_test: true - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: rhel7 display_name: "RHEL 7.x" run_on: rhel79-small batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - id: rhel8 display_name: "RHEL 8.x" run_on: rhel87-small batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-80-64-bit/master/latest/libmongocrypt.tar.gz - - id: rhel80-fips - display_name: "RHEL 8.0 FIPS" - run_on: rhel80-fips + - id: rhel92-fips + display_name: "RHEL 9.2 FIPS" + run_on: rhel92-fips batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-80-64-bit/master/latest/libmongocrypt.tar.gz - id: ubuntu-22.04 display_name: "Ubuntu 22.04" run_on: ubuntu2204-small @@ -2225,7 +2264,6 @@ axes: run_on: rhel82-arm64-small batchtime: 10080 # 7 days variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-82-arm64/master/latest/libmongocrypt.tar.gz - id: windows-64-vsMulti-small display_name: "Windows 64" run_on: windows-64-vsMulti-small @@ -2235,7 +2273,6 @@ axes: skip_EC2_auth_test: true skip_web_identity_auth_test: true venv_bin_dir: "Scripts" - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz # Test with authentication? - id: auth @@ -2327,6 +2364,10 @@ axes: display_name: "MongoDB 7.0" variables: VERSION: "7.0" + - id: "8.0" + display_name: "MongoDB 8.0" + variables: + VERSION: "8.0" - id: "latest" display_name: "MongoDB latest" variables: @@ -2342,10 +2383,6 @@ axes: values: # Note: always display platform with python-version to avoid ambiguous display names. # Linux - - id: "3.7" - display_name: "Python 3.7" - variables: - PYTHON_BINARY: "/opt/python/3.7/bin/python3" - id: "3.8" display_name: "Python 3.8" variables: @@ -2366,10 +2403,10 @@ axes: display_name: "Python 3.12" variables: PYTHON_BINARY: "/opt/python/3.12/bin/python3" - - id: "pypy3.8" - display_name: "PyPy 3.8" + - id: "pypy3.9" + display_name: "PyPy 3.9" variables: - PYTHON_BINARY: "/opt/python/pypy3.8/bin/pypy3" + PYTHON_BINARY: "/opt/python/pypy3.9/bin/pypy3" - id: "pypy3.10" display_name: "PyPy 3.10" variables: @@ -2378,10 +2415,6 @@ axes: - id: python-version-windows display_name: "Python" values: - - id: "3.7" - display_name: "Python 3.7" - variables: - PYTHON_BINARY: "C:/python/Python37/python.exe" - id: "3.8" display_name: "Python 3.8" variables: @@ -2406,10 +2439,6 @@ axes: - id: python-version-windows-32 display_name: "Python" values: - - id: "3.7" - display_name: "32-bit Python 3.7" - variables: - PYTHON_BINARY: "C:/python/32/Python37/python.exe" - id: "3.8" display_name: "32-bit Python 3.8" variables: @@ -2460,11 +2489,11 @@ axes: - id: "without-c-extensions" display_name: "Without C Extensions" variables: - C_EXTENSIONS: "--no_ext" + NO_EXT: "1" - id: "with-c-extensions" display_name: "With C Extensions" variables: - C_EXTENSIONS: "" + NO_EXT: "" # Choice of MongoDB storage engine - id: storage-engine @@ -2569,17 +2598,24 @@ axes: - id: serverless display_name: "Serverless" values: - - id: "enabled" + - id: "original" display_name: "Serverless" variables: test_serverless: true batchtime: 10080 # 7 days + - id: "proxy" + display_name: "Serverless Proxy" + variables: + test_serverless: true + VAULT_NAME: "serverless_next" + IS_SERVERLESS_PROXY: true + batchtime: 10080 # 7 days buildvariants: - matrix_name: "tests-fips" matrix_spec: platform: - - rhel80-fips + - rhel92-fips auth: "auth" ssl: "ssl" display_name: "${platform} ${auth} ${ssl}" @@ -2591,17 +2627,18 @@ buildvariants: platform: # MacOS introduced SSL support with MongoDB >= 3.2. # Older server versions (2.6, 3.0) are supported without SSL. - - macos-1014 + - macos-1100 auth: "*" ssl: "*" exclude_spec: # No point testing with SSL without auth. - - platform: macos-1014 + - platform: macos-1100 auth: "noauth" ssl: "ssl" display_name: "${platform} ${auth} ${ssl}" tasks: - ".latest" + - ".8.0" - ".7.0" - ".6.0" - ".5.0" @@ -2618,6 +2655,7 @@ buildvariants: display_name: "${platform} ${auth-ssl}" tasks: - ".latest" + - ".8.0" - ".7.0" - ".6.0" - ".5.0" @@ -2642,6 +2680,7 @@ buildvariants: add_tasks: &encryption-server-versions - ".rapid" - ".latest" + - ".8.0" - ".7.0" - ".6.0" - ".5.0" @@ -2671,6 +2710,7 @@ buildvariants: tasks: &all-server-versions - ".rapid" - ".latest" + - ".8.0" - ".7.0" - ".6.0" - ".5.0" @@ -2686,10 +2726,10 @@ buildvariants: auth: "*" ssl: "ssl" pyopenssl: "*" - # Only test "noauth" with Python 3.7. + # Only test "noauth" with Python 3.8. exclude_spec: platform: rhel8 - python-version: ["3.8", "3.9", "3.10", "pypy3.8", "pypy3.10"] + python-version: ["3.9", "3.10", "pypy3.9", "pypy3.10"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2701,7 +2741,7 @@ buildvariants: - matrix_name: "tests-pyopenssl-macOS" matrix_spec: - platform: macos-1014 + platform: macos-1100 auth: "auth" ssl: "ssl" pyopenssl: "*" @@ -2750,7 +2790,7 @@ buildvariants: exclude_spec: # These interpreters are always tested without extensions. - platform: rhel8 - python-version: ["pypy3.8", "pypy3.10"] + python-version: ["pypy3.9", "pypy3.10"] c-extensions: "*" auth-ssl: "*" coverage: "*" @@ -2766,7 +2806,7 @@ buildvariants: exclude_spec: # These interpreters are always tested without extensions. - platform: rhel8 - python-version: ["pypy3.8", "pypy3.10"] + python-version: ["pypy3.9", "pypy3.10"] c-extensions: "with-c-extensions" compression: "*" display_name: "${compression} ${c-extensions} ${python-version} ${platform}" @@ -2795,7 +2835,7 @@ buildvariants: exclude_spec: # Don't test green frameworks on these Python versions. - platform: rhel8 - python-version: ["pypy3.8", "pypy3.10"] + python-version: ["pypy3.9", "pypy3.10"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" @@ -2821,7 +2861,7 @@ buildvariants: matrix_spec: platform: rhel7 # Python 3.10+ requires OpenSSL 1.1.1+ - python-version: ["3.7", "3.8", "3.9", "pypy3.8", "pypy3.10"] + python-version: ["3.8", "3.9", "pypy3.9", "pypy3.10"] auth-ssl: "*" display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" tasks: @@ -2844,12 +2884,12 @@ buildvariants: then: add_tasks: *encryption-server-versions -# Storage engine tests on RHEL 8.4 (x86_64) with Python 3.7. +# Storage engine tests on RHEL 8.4 (x86_64) with Python 3.8. - matrix_name: "tests-storage-engines" matrix_spec: platform: rhel8 storage-engine: "*" - python-version: 3.7 + python-version: 3.8 display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: @@ -2859,6 +2899,7 @@ buildvariants: then: add_tasks: - "test-latest-standalone" + - "test-8.0-standalone" - "test-7.0-standalone" - "test-6.0-standalone" - "test-5.0-standalone" @@ -2878,12 +2919,12 @@ buildvariants: - "test-3.6-standalone" - "test-3.6-replica_set" -# enableTestCommands=0 tests on RHEL 8.4 (x86_64) with Python 3.7. +# enableTestCommands=0 tests on RHEL 8.4 (x86_64) with Python 3.8. - matrix_name: "test-disableTestCommands" matrix_spec: platform: rhel8 disableTestCommands: "*" - python-version: "3.7" + python-version: "3.8" display_name: "Disable test commands ${python-version} ${platform}" tasks: - ".latest" @@ -2917,7 +2958,7 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: platform: ubuntu-22.04 - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] mod-wsgi-version: "*" display_name: "${mod-wsgi-version} ${python-version} ${platform}" tasks: @@ -2929,7 +2970,7 @@ buildvariants: - matrix_name: "mockupdb-tests" matrix_spec: platform: rhel8 - python-version: 3.7 + python-version: 3.8 display_name: "MockupDB Tests" tasks: - name: "mockupdb" @@ -2969,15 +3010,25 @@ buildvariants: platform: rhel8 python-version: "*" auth-ssl: auth-ssl - serverless: "*" - display_name: "Serverless ${python-version} ${platform}" + serverless: "original" + display_name: "${serverless} ${python-version} ${platform}" tasks: - "serverless_task_group" -- matrix_name: "data-lake-spec-tests" +- matrix_name: "serverless_proxy" matrix_spec: platform: rhel8 - python-version: ["3.7", "3.10"] + python-version: ["3.8", "3.10"] + auth-ssl: auth-ssl + serverless: "proxy" + display_name: "${serverless} ${python-version} ${platform}" + tasks: + - "serverless_task_group" + +- matrix_name: "data-lake-spec-tests" + matrix_spec: + platform: ubuntu-22.04 + python-version: ["3.8", "3.10"] auth: "auth" c-extensions: "*" display_name: "Atlas Data Lake ${python-version} ${c-extensions}" @@ -2987,7 +3038,7 @@ buildvariants: - matrix_name: "stable-api-tests" matrix_spec: platform: rhel8 - python-version: ["3.7", "3.10"] + python-version: ["3.8", "3.10"] auth: "auth" versionedApi: "*" display_name: "Versioned API ${versionedApi} ${python-version}" @@ -3000,8 +3051,8 @@ buildvariants: - matrix_name: "ocsp-test" matrix_spec: platform: rhel8 - python-version: ["3.7", "3.10", "pypy3.8", "pypy3.10"] - mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] + python-version: ["3.8", "3.10", "pypy3.9", "pypy3.10"] + mongodb-version: ["4.4", "5.0", "6.0", "7.0", "8.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${python-version} ${mongodb-version}" @@ -3012,8 +3063,8 @@ buildvariants: - matrix_name: "ocsp-test-windows" matrix_spec: platform: windows-64-vsMulti-small - python-version-windows: ["3.7", "3.10"] - mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] + python-version-windows: ["3.8", "3.10"] + mongodb-version: ["4.4", "5.0", "6.0", "7.0", "8.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${python-version-windows} ${mongodb-version}" @@ -3024,8 +3075,8 @@ buildvariants: - matrix_name: "ocsp-test-macos" matrix_spec: - platform: macos-1014 - mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] + platform: macos-1100 + mongodb-version: ["4.4", "5.0", "6.0", "7.0", "8.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${mongodb-version}" @@ -3037,9 +3088,24 @@ buildvariants: - matrix_name: "oidc-auth-test" matrix_spec: platform: [ rhel8, macos-1100, windows-64-vsMulti-small ] - display_name: "MONGODB-OIDC Auth ${platform}" + display_name: "OIDC Auth ${platform}" + tasks: + - name: testoidc_task_group + batchtime: 20160 # 14 days + +- name: testazureoidc-variant + display_name: "OIDC Auth Azure" + run_on: ubuntu2204-small + tasks: + - name: testazureoidc_task_group + batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README + +- name: testgcpoidc-variant + display_name: "OIDC Auth GCP" + run_on: ubuntu2204-small tasks: - - name: "oidc-auth-test-latest" + - name: testgcpoidc_task_group + batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README - matrix_name: "aws-auth-test" matrix_spec: @@ -3051,18 +3117,20 @@ buildvariants: - name: "aws-auth-test-5.0" - name: "aws-auth-test-6.0" - name: "aws-auth-test-7.0" + - name: "aws-auth-test-8.0" - name: "aws-auth-test-rapid" - name: "aws-auth-test-latest" - matrix_name: "aws-auth-test-mac" matrix_spec: - platform: [macos-1014] + platform: [macos-1100] display_name: "MONGODB-AWS Auth ${platform} ${python-version-mac}" tasks: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - name: "aws-auth-test-6.0" - name: "aws-auth-test-7.0" + - name: "aws-auth-test-8.0" - name: "aws-auth-test-rapid" - name: "aws-auth-test-latest" @@ -3076,13 +3144,14 @@ buildvariants: - name: "aws-auth-test-5.0" - name: "aws-auth-test-6.0" - name: "aws-auth-test-7.0" + - name: "aws-auth-test-8.0" - name: "aws-auth-test-rapid" - name: "aws-auth-test-latest" - matrix_name: "load-balancer" matrix_spec: platform: rhel8 - mongodb-version: ["6.0", "7.0", "rapid", "latest"] + mongodb-version: ["6.0", "7.0", "8.0", "rapid", "latest"] auth-ssl: "*" python-version: "*" loadbalancer: "*" @@ -3113,6 +3182,18 @@ buildvariants: tasks: - name: test_aws_lambda_task_group +- name: rhel8-pr-assign-reviewer + display_name: Assign PR Reviewer + run_on: rhel87-small + tasks: + - name: "assign-pr-reviewer" + +- name: rhel8-import-time + display_name: Import Time Check + run_on: rhel87-small + tasks: + - name: "check-import-time" + - name: Release display_name: Release batchtime: 20160 # 14 days @@ -3120,6 +3201,14 @@ buildvariants: tasks: - ".release_tag" +- name: "perf-tests" + display_name: "Performance Benchmark Tests" + batchtime: 10080 # 7 days + run_on: rhel90-dbx-perf-large + tasks: + - name: "perf-6.0-standalone" + - name: "perf-6.0-standalone-ssl" + # Platform notes # i386 builds of OpenSSL or Cyrus SASL are not available # Debian 8.1 only supports MongoDB 3.4+ diff --git a/.evergreen/perf.yml b/.evergreen/perf.yml deleted file mode 100644 index 43b21a65fb..0000000000 --- a/.evergreen/perf.yml +++ /dev/null @@ -1,244 +0,0 @@ -######################################## -# Evergreen Template for MongoDB Drivers -######################################## - -# When a task that used to pass starts to fail -# Go through all versions that may have been skipped to detect -# when the task started failing -stepback: true - -# Mark a failure as a system/bootstrap failure (purple box) rather then a task -# failure by default. -# Actual testing tasks are marked with `type: test` -command_type: system - -# Protect ourself against rogue test case, or curl gone wild, that runs forever -# Good rule of thumb: the averageish length a task takes, times 5 -# That roughly accounts for variable system performance for various buildvariants -exec_timeout_secs: 3600 # 60 minutes is the longest we'll ever run - -# What to do when evergreen hits the timeout (`post:` tasks are run automatically) -timeout: - - command: shell.exec - params: - script: | - ls -la - -functions: - "fetch source": - # Executes git clone and applies the submitted patch, if any - - command: git.get_project - params: - directory: "src" - # Applies the subitted patch, if any - # Deprecated. Should be removed. But still needed for certain agents (ZAP) - - command: git.apply_patch - # Make an evergreen exapanstion file with dynamic values - - command: shell.exec - params: - working_dir: "src" - script: | - # Get the current unique version of this checkout - if [ "${is_patch}" = "true" ]; then - CURRENT_VERSION=$(git describe)-patch-${version_id} - else - CURRENT_VERSION=latest - fi - - export DRIVERS_TOOLS="$(pwd)/../drivers-tools" - export PROJECT_DIRECTORY="$(pwd)" - - # Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory - if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin - export DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) - export PROJECT_DIRECTORY=$(cygpath -m $PROJECT_DIRECTORY) - fi - - export MONGO_ORCHESTRATION_HOME="$DRIVERS_TOOLS/.evergreen/orchestration" - export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" - export UPLOAD_BUCKET="${project}" - - cat < expansion.yml - CURRENT_VERSION: "$CURRENT_VERSION" - DRIVERS_TOOLS: "$DRIVERS_TOOLS" - MONGO_ORCHESTRATION_HOME: "$MONGO_ORCHESTRATION_HOME" - MONGODB_BINARIES: "$MONGODB_BINARIES" - UPLOAD_BUCKET: "$UPLOAD_BUCKET" - PROJECT_DIRECTORY: "$PROJECT_DIRECTORY" - PREPARE_SHELL: | - set -o errexit - set -o xtrace - export DRIVERS_TOOLS="$DRIVERS_TOOLS" - export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" - export MONGODB_BINARIES="$MONGODB_BINARIES" - export UPLOAD_BUCKET="$UPLOAD_BUCKET" - export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" - - export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" - export PATH="$MONGODB_BINARIES:$PATH" - export PROJECT="${project}" - EOT - # See what we've done - cat expansion.yml - - # Load the expansion file to make an evergreen variable with the current unique version - - command: expansions.update - params: - file: src/expansion.yml - - "prepare resources": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - rm -rf $DRIVERS_TOOLS - if [ "${project}" = "drivers-tools" ]; then - # If this was a patch build, doing a fresh clone would not actually test the patch - cp -R ${PROJECT_DIRECTORY}/ $DRIVERS_TOOLS - else - git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS - fi - echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > $MONGO_ORCHESTRATION_HOME/orchestration.config - - "bootstrap mongo-orchestration": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} AUTH=${AUTH} SSL=${SSL} STORAGE_ENGINE=${STORAGE_ENGINE} bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh - # run-orchestration generates expansion file with the MONGODB_URI for the cluster - - command: expansions.update - params: - file: mo-expansion.yml - - "stop mongo-orchestration": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - bash ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh - - "run perf tests": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-perf-tests.sh - - "attach benchmark test results": - - command: attach.results - params: - file_location: src/report.json - - "send dashboard data": - - command: perf.send - params: - file: src/results.json - - "cleanup": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - rm -rf $DRIVERS_TOOLS || true - - "fix absolute paths": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - for filename in $(find ${DRIVERS_TOOLS} -name \*.json); do - perl -p -i -e "s|ABSOLUTE_PATH_REPLACEMENT_TOKEN|${DRIVERS_TOOLS}|g" $filename - done - - "windows fix": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY}/.evergreen -name \*.sh); do - cat $i | tr -d '\r' > $i.new - mv $i.new $i - done - # Copy client certificate because symlinks do not work on Windows. - cp ${DRIVERS_TOOLS}/.evergreen/x509gen/client.pem ${MONGO_ORCHESTRATION_HOME}/lib/client.pem - - "make files executable": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY}/.evergreen -name \*.sh); do - chmod +x $i - done - - "install dependencies": - - command: shell.exec - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - file="${PROJECT_DIRECTORY}/.evergreen/install-dependencies.sh" - # Don't use ${file} syntax here because evergreen treats it as an empty expansion. - [ -f "$file" ] && bash $file || echo "$file not available, skipping" - -pre: - - func: "fetch source" - - func: "prepare resources" - # We don't run perf on Windows (yet) - #- func: "windows fix" - - func: "fix absolute paths" - - func: "make files executable" - # We're not testing with TLS (yet) - #- func: "install dependencies" - -post: - - func: "stop mongo-orchestration" - - func: "cleanup" - -tasks: - - name: "perf-4.0-standalone" - tags: ["perf"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.0" - TOPOLOGY: "server" - - func: "run perf tests" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-4.4-standalone" - tags: ["perf"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.4" - TOPOLOGY: "server" - - func: "run perf tests" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-6.0-standalone" - tags: ["perf"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "6.0" - TOPOLOGY: "server" - - func: "run perf tests" - - func: "attach benchmark test results" - - func: "send dashboard data" - -buildvariants: - -- name: "perf-tests" - display_name: "Performance Benchmark Tests" - batchtime: 10080 # 7 days - run_on: ubuntu2004-large - tasks: - - name: "perf-4.0-standalone" - - name: "perf-4.4-standalone" - - name: "perf-6.0-standalone" diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index f9ac89e947..7271e8d461 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -39,7 +39,7 @@ while getopts 'b:c:s:' flag; do done shift $((OPTIND-1)) -if [ -z $BRANCH ] +if [ -n "$BRANCH" ] then git -C $SPECS checkout $BRANCH fi @@ -98,13 +98,27 @@ do cpjson client-side-encryption/limits/ client-side-encryption/limits cpjson client-side-encryption/etc/data client-side-encryption/etc/data ;; + connection-monitoring|connection_monitoring) + cpjson connection-monitoring-and-pooling/tests/cmap-format connection_monitoring + ;; + connection-logging|connection_logging) + cpjson connection-monitoring-and-pooling/tests/logging connection_logging + ;; cmap|CMAP|connection-monitoring-and-pooling) - cpjson connection-monitoring-and-pooling/tests cmap - rm $PYMONGO/test/cmap/wait-queue-fairness.json # PYTHON-1873 + cpjson connection-monitoring-and-pooling/tests/logging connection_logging + cpjson connection-monitoring-and-pooling/tests/cmap-format connection_monitoring + rm $PYMONGO/test/connection_monitoring/wait-queue-fairness.json # PYTHON-1873 ;; apm|APM|command-monitoring|command_monitoring) cpjson command-logging-and-monitoring/tests/monitoring command_monitoring ;; + command-logging|command_logging) + cpjson command-logging-and-monitoring/tests/logging command_logging + ;; + clam|CLAM|command-logging-and-monitoring|command_logging_and_monitoring) + cpjson command-logging-and-monitoring/tests/logging command_logging + cpjson command-logging-and-monitoring/tests/monitoring command_monitoring + ;; crud|CRUD) cpjson crud/tests/ crud ;; @@ -151,6 +165,11 @@ do ;; server-selection|server_selection) cpjson server-selection/tests/ server_selection + rm -rf $PYMONGO/test/server_selection/logging + cpjson server-selection/tests/logging server_selection_logging + ;; + server-selection-logging|server_selection_logging) + cpjson server-selection/tests/logging server_selection_logging ;; sessions) cpjson sessions/tests/ sessions diff --git a/.evergreen/run-azurekms-fail-test.sh b/.evergreen/run-azurekms-fail-test.sh new file mode 100644 index 0000000000..13b34d01e2 --- /dev/null +++ b/.evergreen/run-azurekms-fail-test.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -o errexit # Exit the script with error if any of the commands fail + +. $DRIVERS_TOOLS/.evergreen/csfle/azurekms/setup-secrets.sh +PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 \ + KEY_NAME="${AZUREKMS_KEYNAME}" \ + KEY_VAULT_ENDPOINT="${AZUREKMS_KEYVAULTENDPOINT}" \ + LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz \ + SUCCESS=false TEST_FLE_AZURE_AUTO=1 \ + ./.evergreen/tox.sh -m test-eg diff --git a/.evergreen/run-azurekms-test.sh b/.evergreen/run-azurekms-test.sh new file mode 100644 index 0000000000..d8fb3449f4 --- /dev/null +++ b/.evergreen/run-azurekms-test.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -o errexit # Exit the script with error if any of the commands fail + +source ${DRIVERS_TOOLS}/.evergreen/csfle/azurekms/secrets-export.sh +echo "Copying files ... begin" +export AZUREKMS_RESOURCEGROUP=${AZUREKMS_RESOURCEGROUP} +export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} +export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey +tar czf /tmp/mongo-python-driver.tgz . +# shellcheck disable=SC2088 +AZUREKMS_SRC="/tmp/mongo-python-driver.tgz" AZUREKMS_DST="~/" \ + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/copy-file.sh +echo "Copying files ... end" +echo "Untarring file ... begin" +AZUREKMS_CMD="tar xf mongo-python-driver.tgz" \ + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh +echo "Untarring file ... end" +echo "Running test ... begin" +AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz SUCCESS=true TEST_FLE_AZURE_AUTO=1 ./.evergreen/tox.sh -m test-eg" \ + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh +echo "Running test ... end" diff --git a/.evergreen/run-gcpkms-test.sh b/.evergreen/run-gcpkms-test.sh new file mode 100644 index 0000000000..221100de8a --- /dev/null +++ b/.evergreen/run-gcpkms-test.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -o errexit # Exit the script with error if any of the commands fail + +source ${DRIVERS_TOOLS}/.evergreen/csfle/gcpkms/secrets-export.sh +echo "Copying files ... begin" +export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} +export GCPKMS_PROJECT=${GCPKMS_PROJECT} +export GCPKMS_ZONE=${GCPKMS_ZONE} +export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} +tar czf /tmp/mongo-python-driver.tgz . +GCPKMS_SRC=/tmp/mongo-python-driver.tgz GCPKMS_DST=$GCPKMS_INSTANCENAME: $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/copy-file.sh +echo "Copying files ... end" +echo "Untarring file ... begin" +GCPKMS_CMD="tar xf mongo-python-driver.tgz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh +echo "Untarring file ... end" +echo "Running test ... begin" +GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz ./.evergreen/tox.sh -m test-eg" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh +echo "Running test ... end" diff --git a/.evergreen/run-import-time-test.sh b/.evergreen/run-import-time-test.sh new file mode 100755 index 0000000000..2b17f5ffeb --- /dev/null +++ b/.evergreen/run-import-time-test.sh @@ -0,0 +1,33 @@ +#!/bin/bash -ex + +set -o errexit # Exit the script with error if any of the commands fail +set -x + +. .evergreen/utils.sh + +if [ -z "$PYTHON_BINARY" ]; then + PYTHON_BINARY=$(find_python3) +fi + +# Use the previous commit if this was not a PR run. +if [ "$BASE_SHA" == "$HEAD_SHA" ]; then + BASE_SHA=$(git rev-parse HEAD~1) +fi + +function get_import_time() { + local log_file + createvirtualenv "$PYTHON_BINARY" import-venv + python -m pip install -q ".[aws,encryption,gssapi,ocsp,snappy,zstd]" + # Import once to cache modules + python -c "import pymongo" + log_file="pymongo-$1.log" + python -X importtime -c "import pymongo" 2> $log_file +} + +get_import_time $HEAD_SHA +git stash +git checkout $BASE_SHA +get_import_time $BASE_SHA +git checkout $HEAD_SHA +git stash apply +python tools/compare_import_time.py $HEAD_SHA $BASE_SHA diff --git a/.evergreen/run-mod-wsgi-tests.sh b/.evergreen/run-mod-wsgi-tests.sh index afb3f271ae..e1f5238110 100644 --- a/.evergreen/run-mod-wsgi-tests.sh +++ b/.evergreen/run-mod-wsgi-tests.sh @@ -19,7 +19,10 @@ fi PYTHON_VERSION=$(${PYTHON_BINARY} -c "import sys; sys.stdout.write('.'.join(str(val) for val in sys.version_info[:2]))") # Ensure the C extensions are installed. -${PYTHON_BINARY} setup.py build_ext -i +${PYTHON_BINARY} -m venv --system-site-packages .venv +source .venv/bin/activate +pip install -U pip +python -m pip install -e . export MOD_WSGI_SO=/opt/python/mod_wsgi/python_version/$PYTHON_VERSION/mod_wsgi_version/$MOD_WSGI_VERSION/mod_wsgi.so export PYTHONHOME=/opt/python/$PYTHON_VERSION @@ -38,10 +41,12 @@ trap '$APACHE -k stop -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG wget -t 1 -T 10 -O - "http://localhost:8080/interpreter1${PROJECT_DIRECTORY}" || (cat error_log && exit 1) wget -t 1 -T 10 -O - "http://localhost:8080/interpreter2${PROJECT_DIRECTORY}" || (cat error_log && exit 1) -${PYTHON_BINARY} ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 -t 100 parallel \ +python ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 -t 100 parallel \ http://localhost:8080/interpreter1${PROJECT_DIRECTORY} http://localhost:8080/interpreter2${PROJECT_DIRECTORY} || \ (tail -n 100 error_log && exit 1) -${PYTHON_BINARY} ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 serial \ +python ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 serial \ http://localhost:8080/interpreter1${PROJECT_DIRECTORY} http://localhost:8080/interpreter2${PROJECT_DIRECTORY} || \ (tail -n 100 error_log && exit 1) + +rm -rf .venv diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh index 75fafb448b..89a2119308 100755 --- a/.evergreen/run-mongodb-oidc-test.sh +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -1,48 +1,32 @@ #!/bin/bash set +x # Disable debug trace -set -o errexit # Exit the script with error if any of the commands fail +set -eu echo "Running MONGODB-OIDC authentication tests" -# Make sure DRIVERS_TOOLS is set. -if [ -z "$DRIVERS_TOOLS" ]; then - echo "Must specify DRIVERS_TOOLS" - exit 1 -fi +OIDC_ENV=${OIDC_ENV:-"test"} -# Get the drivers secrets. Use an existing secrets file first. -if [ ! -f "./secrets-export.sh" ]; then - bash ${DRIVERS_TOOLS}/.evergreen/auth_aws/setup_secrets.sh drivers/oidc -fi -source ./secrets-export.sh +if [ $OIDC_ENV == "test" ]; then + # Make sure DRIVERS_TOOLS is set. + if [ -z "$DRIVERS_TOOLS" ]; then + echo "Must specify DRIVERS_TOOLS" + exit 1 + fi + source ${DRIVERS_TOOLS}/.evergreen/auth_oidc/secrets-export.sh -# # If the file did not have our creds, get them from the vault. -if [ -z "$OIDC_ATLAS_URI_SINGLE" ]; then - bash ${DRIVERS_TOOLS}/.evergreen/auth_aws/setup_secrets.sh drivers/oidc +elif [ $OIDC_ENV == "azure" ]; then + source ./env.sh + +elif [ $OIDC_ENV == "gcp" ]; then source ./secrets-export.sh -fi -# Make the OIDC tokens. -set -x -pushd ${DRIVERS_TOOLS}/.evergreen/auth_oidc -. ./oidc_get_tokens.sh -popd - -# Set up variables and run the test. -if [ -n "$LOCAL_OIDC_SERVER" ]; then - export MONGODB_URI=${MONGODB_URI:-"mongodb://localhost"} - export MONGODB_URI_SINGLE="${MONGODB_URI}/?authMechanism=MONGODB-OIDC" - export MONGODB_URI_MULTI="${MONGODB_URI}:27018/?authMechanism=MONGODB-OIDC&directConnection=true" else - set +x # turn off xtrace for this portion - export MONGODB_URI="$OIDC_ATLAS_URI_SINGLE" - export MONGODB_URI_SINGLE="$OIDC_ATLAS_URI_SINGLE/?authMechanism=MONGODB-OIDC" - export MONGODB_URI_MULTI="$OIDC_ATLAS_URI_MULTI/?authMechanism=MONGODB-OIDC" - set -x + echo "Unrecognized OIDC_ENV $OIDC_ENV" + exit 1 fi export TEST_AUTH_OIDC=1 export COVERAGE=1 export AUTH="auth" -bash ./.evergreen/tox.sh -m test-eg +bash ./.evergreen/tox.sh -m test-eg -- "${@:1}" diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh index 72be38e03d..9d6afacf7b 100644 --- a/.evergreen/run-perf-tests.sh +++ b/.evergreen/run-perf-tests.sh @@ -3,17 +3,17 @@ set -o xtrace set -o errexit -git clone https://github.com/mongodb-labs/driver-performance-test-data.git -cd driver-performance-test-data +git clone --depth 1 https://github.com/mongodb/specifications.git +pushd specifications/source/benchmarking/data tar xf extended_bson.tgz tar xf parallel.tgz tar xf single_and_multi_document.tgz -cd .. +popd -export TEST_PATH="${PROJECT_DIRECTORY}/driver-performance-test-data" +export TEST_PATH="${PROJECT_DIRECTORY}/specifications/source/benchmarking/data" export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" -export PYTHON_BINARY=/opt/mongodbtoolchain/v3/bin/python3 +export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 export PERF_TEST=1 bash ./.evergreen/tox.sh -m test-eg diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 8a31a96a3c..d47e3a9505 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -8,7 +8,6 @@ set -o xtrace # AUTH Set to enable authentication. Defaults to "noauth" # SSL Set to enable SSL. Defaults to "nossl" # GREEN_FRAMEWORK The green framework to test with, if any. -# C_EXTENSIONS Pass --no_ext to skip installing the C extensions. # COVERAGE If non-empty, run the test suite with coverage. # COMPRESSORS If non-empty, install appropriate compressor. # LIBMONGOCRYPT_URL The URL to download libmongocrypt. @@ -25,20 +24,25 @@ set -o xtrace # TEST_AUTH_OIDC If non-empty, test OIDC Auth Mechanism # TEST_PERF If non-empty, run performance tests # TEST_OCSP If non-empty, run OCSP tests +# TEST_ATLAS If non-empty, test Atlas connections +# TEST_INDEX_MANAGEMENT If non-empty, run index management tests # TEST_ENCRYPTION_PYOPENSSL If non-empy, test encryption with PyOpenSSL -# TEST_ATLAS If non-empty, test Atlas connections AUTH=${AUTH:-noauth} SSL=${SSL:-nossl} -TEST_ARGS="$1" -PYTHON=$(which python) +TEST_ARGS="${*:1}" + export PIP_QUIET=1 # Quiet by default +export PIP_PREFER_BINARY=1 # Prefer binary dists by default python -c "import sys; sys.exit(sys.prefix == sys.base_prefix)" || (echo "Not inside a virtual env!"; exit 1) -# Try to source exported AWS Secrets +# Try to source local Drivers Secrets if [ -f ./secrets-export.sh ]; then + echo "Sourcing secrets" source ./secrets-export.sh +else + echo "Not sourcing secrets" fi if [ "$AUTH" != "noauth" ]; then @@ -47,15 +51,22 @@ if [ "$AUTH" != "noauth" ]; then export DB_USER="mhuser" export DB_PASSWORD="pencil" elif [ ! -z "$TEST_SERVERLESS" ]; then + source ${DRIVERS_TOOLS}/.evergreen/serverless/secrets-export.sh export DB_USER=$SERVERLESS_ATLAS_USER export DB_PASSWORD=$SERVERLESS_ATLAS_PASSWORD + export MONGODB_URI="$SERVERLESS_URI" + echo "MONGODB_URI=$MONGODB_URI" + export SINGLE_MONGOS_LB_URI=$MONGODB_URI + export MULTI_MONGOS_LB_URI=$MONGODB_URI elif [ ! -z "$TEST_AUTH_OIDC" ]; then - export DB_USER=$OIDC_ALTAS_USER - export DB_PASSWORD=$OIDC_ATLAS_PASSWORD + export DB_USER=$OIDC_ADMIN_USER + export DB_PASSWORD=$OIDC_ADMIN_PWD + export DB_IP="$MONGODB_URI" else export DB_USER="bob" export DB_PASSWORD="pwd123" fi + echo "Added auth, DB_USER: $DB_USER" set -x fi @@ -99,7 +110,6 @@ fi if [ "$COMPRESSORS" = "snappy" ]; then python -m pip install '.[snappy]' - PYTHON=python elif [ "$COMPRESSORS" = "zstd" ]; then python -m pip install zstandard fi @@ -111,32 +121,13 @@ fi if [ -n "$TEST_ENCRYPTION" ] || [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE_GCP_AUTO" ]; then - # Work around for root certifi not being installed. - # TODO: Remove after PYTHON-3952 is deployed. - if [ "$(uname -s)" = "Darwin" ]; then - python -m pip install certifi - CERT_PATH=$(python -c "import certifi; print(certifi.where())") - export SSL_CERT_FILE=${CERT_PATH} - export REQUESTS_CA_BUNDLE=${CERT_PATH} - export AWS_CA_BUNDLE=${CERT_PATH} - fi - python -m pip install '.[encryption]' - if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin - # PYTHON-2808 Ensure this machine has the CA cert for google KMS. - powershell.exe "Invoke-WebRequest -URI https://oauth2.googleapis.com/" > /dev/null || true + # Install libmongocrypt if necessary. + if [ ! -d "libmongocrypt" ]; then + bash ./.evergreen/setup-libmongocrypt.sh fi - if [ -z "$LIBMONGOCRYPT_URL" ]; then - echo "Cannot test client side encryption without LIBMONGOCRYPT_URL!" - exit 1 - fi - curl -O "$LIBMONGOCRYPT_URL" - mkdir libmongocrypt - tar xzf libmongocrypt.tar.gz -C ./libmongocrypt - ls -la libmongocrypt - ls -la libmongocrypt/nocrypto # Use the nocrypto build to avoid dependency issues with older windows/python versions. BASE=$(pwd)/libmongocrypt/nocrypto if [ -f "${BASE}/lib/libmongocrypt.so" ]; then @@ -157,8 +148,10 @@ if [ -n "$TEST_ENCRYPTION" ] || [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE export PYMONGOCRYPT_LIB # TODO: Test with 'pip install pymongocrypt' - git clone https://github.com/mongodb/libmongocrypt.git libmongocrypt_git - python -m pip install --prefer-binary -r .evergreen/test-encryption-requirements.txt + if [ ! -d "libmongocrypt_git" ]; then + git clone https://github.com/mongodb/libmongocrypt.git libmongocrypt_git + fi + python -m pip install -U setuptools python -m pip install ./libmongocrypt_git/bindings/python python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" @@ -170,10 +163,6 @@ if [ -n "$TEST_ENCRYPTION" ]; then python -m pip install '.[ocsp]' fi - # Get access to the AWS temporary credentials: - # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN - . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh - if [ -n "$TEST_CRYPT_SHARED" ]; then CRYPT_SHARED_DIR=`dirname $CRYPT_SHARED_LIB_PATH` echo "using crypt_shared_dir $CRYPT_SHARED_DIR" @@ -204,6 +193,11 @@ if [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE_GCP_AUTO" ]; then fi if [ -n "$TEST_INDEX_MANAGEMENT" ]; then + source $DRIVERS_TOOLS/.evergreen/atlas/secrets-export.sh + export DB_USER="${DRIVERS_ATLAS_LAMBDA_USER}" + set +x + export DB_PASSWORD="${DRIVERS_ATLAS_LAMBDA_PASSWORD}" + set -x TEST_ARGS="test/test_index_management.py" fi @@ -227,18 +221,7 @@ fi if [ -n "$TEST_AUTH_OIDC" ]; then python -m pip install ".[aws]" - - # Work around for root certifi not being installed. - # TODO: Remove after PYTHON-3952 is deployed. - if [ "$(uname -s)" = "Darwin" ]; then - python -m pip install certifi - CERT_PATH=$(python -c "import certifi; print(certifi.where())") - export SSL_CERT_FILE=${CERT_PATH} - export REQUESTS_CA_BUNDLE=${CERT_PATH} - export AWS_CA_BUNDLE=${CERT_PATH} - fi - - TEST_ARGS="test/auth_oidc/test_auth_oidc.py" + TEST_ARGS="test/auth_oidc/test_auth_oidc.py $TEST_ARGS" fi if [ -n "$PERF_TEST" ]; then @@ -247,7 +230,7 @@ if [ -n "$PERF_TEST" ]; then TEST_ARGS="test/performance/perf_test.py" fi -echo "Running $AUTH tests over $SSL with python $PYTHON" +echo "Running $AUTH tests over $SSL with python $(which python)" python -c 'import sys; print(sys.version)' @@ -256,24 +239,25 @@ python -c 'import sys; print(sys.version)' # Run the tests with coverage if requested and coverage is installed. # Only cover CPython. PyPy reports suspiciously low coverage. -PYTHON_IMPL=$($PYTHON -c "import platform; print(platform.python_implementation())") +PYTHON_IMPL=$(python -c "import platform; print(platform.python_implementation())") if [ -n "$COVERAGE" ] && [ "$PYTHON_IMPL" = "CPython" ]; then - # coverage 7.3 dropped support for Python 3.7, keep in sync with combine-coverage.sh. + # Keep in sync with combine-coverage.sh. # coverage >=5 is needed for relative_files=true. - python -m pip install pytest-cov "coverage>=5,<7.3" + python -m pip install pytest-cov "coverage>=5,<=7.5" TEST_ARGS="$TEST_ARGS --cov" fi if [ -n "$GREEN_FRAMEWORK" ]; then - python -m pip install $GREEN_FRAMEWORK + python -m pip install $GREEN_FRAMEWORK fi # Show the installed packages PIP_QUIET=0 python -m pip list if [ -z "$GREEN_FRAMEWORK" ]; then - .evergreen/check-c-extensions.sh - python -m pytest -v --durations=5 --maxfail=10 $TEST_ARGS + # Use --capture=tee-sys so pytest prints test output inline: + # https://docs.pytest.org/en/stable/how-to/capture-stdout-stderr.html + python -m pytest -v --capture=tee-sys --durations=5 --maxfail=10 $TEST_ARGS else python green_framework_test.py $GREEN_FRAMEWORK -v $TEST_ARGS fi diff --git a/.evergreen/setup-libmongocrypt.sh b/.evergreen/setup-libmongocrypt.sh new file mode 100644 index 0000000000..99ca6ebd3b --- /dev/null +++ b/.evergreen/setup-libmongocrypt.sh @@ -0,0 +1,47 @@ +#!/bin/bash +set -o errexit # Exit the script with error if any of the commands fail +set -o xtrace + +TARGET="" + +if [ "Windows_NT" = "${OS:-''}" ]; then # Magic variable in cygwin + # PYTHON-2808 Ensure this machine has the CA cert for google KMS. + powershell.exe "Invoke-WebRequest -URI https://oauth2.googleapis.com/" > /dev/null || true + TARGET="windows-test" +fi + +if [ "$(uname -s)" = "Darwin" ]; then + TARGET="macos" +fi + +if [ "$(uname -s)" = "Linux" ]; then + rhel_ver=$(awk -F'=' '/VERSION_ID/{ gsub(/"/,""); print $2}' /etc/os-release) + arch=$(uname -m) + echo "RHEL $rhel_ver $arch" + if [[ $rhel_ver =~ 7 ]]; then + TARGET="rhel-70-64-bit" + elif [[ $rhel_ver =~ 8 ]]; then + if [ "$arch" = "x86_64" ]; then + TARGET="rhel-80-64-bit" + elif [ "$arch" = "arm" ]; then + TARGET="rhel-82-arm64" + fi + fi +fi + +if [ -z "$LIBMONGOCRYPT_URL" ] && [ -n "$TARGET" ]; then + LIBMONGOCRYPT_URL="https://s3.amazonaws.com/mciuploads/libmongocrypt/$TARGET/master/latest/libmongocrypt.tar.gz" +fi + +if [ -z "$LIBMONGOCRYPT_URL" ]; then + echo "Cannot test client side encryption without LIBMONGOCRYPT_URL!" + exit 1 +fi +rm -rf libmongocrypt libmongocrypt.tar.gz +echo "Fetching $LIBMONGOCRYPT_URL..." +curl -O "$LIBMONGOCRYPT_URL" +echo "Fetching $LIBMONGOCRYPT_URL...done" +mkdir libmongocrypt +tar xzf libmongocrypt.tar.gz -C ./libmongocrypt +ls -la libmongocrypt +ls -la libmongocrypt/nocrypto diff --git a/.evergreen/test-encryption-requirements.txt b/.evergreen/test-encryption-requirements.txt deleted file mode 100644 index 13ed7ebb15..0000000000 --- a/.evergreen/test-encryption-requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -cffi>=1.12.0,<2 -# boto3 is required by drivers-evergreen-tools/.evergreen/csfle/set-temp-creds.sh -boto3<2 diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 35005c0d6a..f0a5851d91 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -4,8 +4,8 @@ set -o xtrace find_python3() { PYTHON="" - # Add a fallback system python3 if it is available and Python 3.7+. - if is_python_37 "$(command -v python3)"; then + # Add a fallback system python3 if it is available and Python 3.8+. + if is_python_38 "$(command -v python3)"; then PYTHON="$(command -v python3)" fi # Find a suitable toolchain version, if available. @@ -14,23 +14,23 @@ find_python3() { if [ -d "/Library/Frameworks/Python.Framework/Versions/3.10" ]; then PYTHON="/Library/Frameworks/Python.Framework/Versions/3.10/bin/python3" # macos 10.14 - elif [ -d "/Library/Frameworks/Python.Framework/Versions/3.7" ]; then - PYTHON="/Library/Frameworks/Python.Framework/Versions/3.7/bin/python3" + elif [ -d "/Library/Frameworks/Python.Framework/Versions/3.8" ]; then + PYTHON="/Library/Frameworks/Python.Framework/Versions/3.8/bin/python3" fi elif [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin - PYTHON="C:/python/Python37/python.exe" + PYTHON="C:/python/Python38/python.exe" else - # Prefer our own toolchain, fall back to mongodb toolchain if it has Python 3.7+. - if [ -f "/opt/python/3.7/bin/python3" ]; then - PYTHON="/opt/python/3.7/bin/python3" - elif is_python_37 "$(command -v /opt/mongodbtoolchain/v4/bin/python3)"; then + # Prefer our own toolchain, fall back to mongodb toolchain if it has Python 3.8+. + if [ -f "/opt/python/3.8/bin/python3" ]; then + PYTHON="/opt/python/3.8/bin/python3" + elif is_python_38 "$(command -v /opt/mongodbtoolchain/v4/bin/python3)"; then PYTHON="/opt/mongodbtoolchain/v4/bin/python3" - elif is_python_37 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then + elif is_python_38 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then PYTHON="/opt/mongodbtoolchain/v3/bin/python3" fi fi if [ -z "$PYTHON" ]; then - echo "Cannot test without python3.7+ installed!" + echo "Cannot test without python3.8+ installed!" exit 1 fi echo "$PYTHON" @@ -66,7 +66,7 @@ createvirtualenv () { export PIP_QUIET=1 python -m pip install --upgrade pip - python -m pip install --upgrade setuptools tox + python -m pip install --upgrade tox } # Usage: @@ -96,15 +96,15 @@ testinstall () { fi } -# Function that returns success if the provided Python binary is version 3.7 or later +# Function that returns success if the provided Python binary is version 3.8 or later # Usage: -# is_python_37 /path/to/python +# is_python_38 /path/to/python # * param1: Python binary -is_python_37() { +is_python_38() { if [ -z "$1" ]; then return 1 - elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 7))"; then - # runs when sys.version_info[:2] >= (3, 7) + elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 8))"; then + # runs when sys.version_info[:2] >= (3, 8) return 0 else return 1 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index a9d726b96b..0000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,2 +0,0 @@ -# Global owner for repo -* @mongodb/dbx-python diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..5bf500ba12 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,16 @@ +version: 2 +updates: + # GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + groups: + actions: + patterns: + - "*" + # Python + - package-ecosystem: "pip" + directory: "/" + schedule: + interval: "weekly" diff --git a/.github/reviewers.txt b/.github/reviewers.txt new file mode 100644 index 0000000000..9e38ee71b5 --- /dev/null +++ b/.github/reviewers.txt @@ -0,0 +1,5 @@ +# List of reviewers for auto-assignment of reviews. +caseyclements +blink1073 +Jibola +NoahStapp diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000000..370b8759e6 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,66 @@ +name: "CodeQL" + +on: + push: + branches: [ "master", "v*"] + tags: ['*'] + pull_request: + workflow_call: + inputs: + ref: + required: true + type: string + schedule: + - cron: '17 10 * * 2' + +concurrency: + group: codeql-${{ github.ref }} + cancel-in-progress: true + +jobs: + analyze: + name: Analyze (${{ matrix.language }}) + runs-on: "ubuntu-latest" + timeout-minutes: 360 + permissions: + # required for all workflows + security-events: write + + strategy: + fail-fast: false + matrix: + include: + - language: c-cpp + build-mode: manual + - language: python + build-mode: none + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + ref: ${{ inputs.ref }} + - uses: actions/setup-python@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + queries: security-extended + config: | + paths-ignore: + - '.github/**' + - 'doc/**' + - 'tools/**' + - 'test/**' + + - if: matrix.build-mode == 'manual' + run: | + pip install -e . + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml new file mode 100644 index 0000000000..7ec55dd3b3 --- /dev/null +++ b/.github/workflows/dist.yml @@ -0,0 +1,146 @@ +name: Python Dist + +on: + push: + tags: + - "[0-9]+.[0-9]+.[0-9]+" + - "[0-9]+.[0-9]+.[0-9]+.post[0-9]+" + - "[0-9]+.[0-9]+.[0-9]+[a-b][0-9]+" + - "[0-9]+.[0-9]+.[0-9]+rc[0-9]+" + workflow_dispatch: + pull_request: + workflow_call: + inputs: + ref: + required: true + type: string + +concurrency: + group: dist-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash -eux {0} + +jobs: + build_wheels: + name: Build wheels for ${{ matrix.buildplat[1] }} + runs-on: ${{ matrix.buildplat[0] }} + strategy: + # Ensure that a wheel builder finishes even if another fails + fail-fast: false + matrix: + # Github Actions doesn't support pairing matrix values together, let's improvise + # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 + buildplat: + - [ubuntu-20.04, "manylinux_x86_64", "cp3*-manylinux_x86_64"] + - [ubuntu-20.04, "manylinux_aarch64", "cp3*-manylinux_aarch64"] + - [ubuntu-20.04, "manylinux_ppc64le", "cp3*-manylinux_ppc64le"] + - [ubuntu-20.04, "manylinux_s390x", "cp3*-manylinux_s390x"] + - [ubuntu-20.04, "manylinux_i686", "cp3*-manylinux_i686"] + - [windows-2019, "win_amd6", "cp3*-win_amd64"] + - [windows-2019, "win32", "cp3*-win32"] + - [macos-14, "macos", "cp*-macosx_*"] + + steps: + - name: Checkout pymongo + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: ${{ inputs.ref }} + + - uses: actions/setup-python@v5 + with: + cache: 'pip' + python-version: 3.8 + cache-dependency-path: 'pyproject.toml' + allow-prereleases: true + + - name: Set up QEMU + if: runner.os == 'Linux' + uses: docker/setup-qemu-action@v3 + with: + platforms: all + + - name: Install cibuildwheel + # Note: the default manylinux is manylinux2014 + run: | + python -m pip install -U pip + python -m pip install "cibuildwheel>=2.17,<3" + + - name: Build wheels + env: + CIBW_BUILD: ${{ matrix.buildplat[2] }} + run: python -m cibuildwheel --output-dir wheelhouse + + - name: Build manylinux1 wheels + if: ${{ matrix.buildplat[1] == 'manylinux_x86_64' || matrix.buildplat[1] == 'manylinux_i686' }} + env: + CIBW_MANYLINUX_X86_64_IMAGE: manylinux1 + CIBW_MANYLINUX_I686_IMAGE: manylinux1 + CIBW_BUILD: "cp38-${{ matrix.buildplat[1] }} cp39-${{ matrix.buildplat[1] }}" + run: python -m cibuildwheel --output-dir wheelhouse + + - name: Assert all versions in wheelhouse + if: ${{ ! startsWith(matrix.buildplat[1], 'macos') }} + run: | + ls wheelhouse/*cp38*.whl + ls wheelhouse/*cp39*.whl + ls wheelhouse/*cp310*.whl + ls wheelhouse/*cp311*.whl + ls wheelhouse/*cp312*.whl + + - uses: actions/upload-artifact@v4 + with: + name: wheel-${{ matrix.buildplat[1] }} + path: ./wheelhouse/*.whl + if-no-files-found: error + + make_sdist: + name: Make SDist + runs-on: macos-13 + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: ${{ inputs.ref }} + + - uses: actions/setup-python@v5 + with: + # Build sdist on lowest supported Python + python-version: '3.8' + + - name: Build SDist + run: | + set -ex + python -m pip install -U pip build + python -m build --sdist . + + - name: Test SDist + run: | + python -m pip install dist/*.gz + cd .. + python -c "from pymongo import has_c; assert has_c()" + + - uses: actions/upload-artifact@v4 + with: + name: "sdist" + path: ./dist/*.tar.gz + + collect_dist: + runs-on: ubuntu-latest + needs: [build_wheels, make_sdist] + name: Download Wheels + steps: + - name: Download all workflow run artifacts + uses: actions/download-artifact@v4 + - name: Flatten directory + working-directory: . + run: | + find . -mindepth 2 -type f -exec mv {} . \; + find . -type d -empty -delete + - uses: actions/upload-artifact@v4 + with: + name: all-dist-${{ github.run_id }} + path: "./*" diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml new file mode 100644 index 0000000000..2ce2c50033 --- /dev/null +++ b/.github/workflows/release-python.yml @@ -0,0 +1,95 @@ +name: Release + +on: + workflow_dispatch: + inputs: + version: + description: "The new version to set" + required: true + following_version: + description: "The post (dev) version to set" + required: true + dry_run: + description: "Dry Run?" + default: false + type: boolean + +env: + # Changes per repo + PRODUCT_NAME: PyMongo + # Changes per branch + SILK_ASSET_GROUP: mongodb-python-driver + EVERGREEN_PROJECT: mongo-python-driver-v4.8 + +defaults: + run: + shell: bash -eux {0} + +jobs: + pre-publish: + environment: release + runs-on: ubuntu-latest + permissions: + id-token: write + contents: write + outputs: + version: ${{ steps.pre-publish.outputs.version }} + steps: + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v2 + with: + app_id: ${{ vars.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + - uses: mongodb-labs/drivers-github-tools/setup@v2 + with: + aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} + aws_region_name: ${{ vars.AWS_REGION_NAME }} + aws_secret_id: ${{ secrets.AWS_SECRET_ID }} + artifactory_username: ${{ vars.ARTIFACTORY_USERNAME }} + - uses: mongodb-labs/drivers-github-tools/python/pre-publish@v2 + id: pre-publish + with: + version: ${{ inputs.version }} + dry_run: ${{ inputs.dry_run }} + + build-dist: + needs: [pre-publish] + uses: ./.github/workflows/dist.yml + with: + ref: ${{ needs.pre-publish.outputs.version }} + + static-scan: + needs: [pre-publish] + permissions: + security-events: write + uses: ./.github/workflows/codeql.yml + with: + ref: ${{ needs.pre-publish.outputs.version }} + + publish: + needs: [build-dist, static-scan] + runs-on: ubuntu-latest + environment: release + permissions: + id-token: write + contents: write + security-events: write + steps: + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v2 + with: + app_id: ${{ vars.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + - uses: mongodb-labs/drivers-github-tools/setup@v2 + with: + aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} + aws_region_name: ${{ vars.AWS_REGION_NAME }} + aws_secret_id: ${{ secrets.AWS_SECRET_ID }} + artifactory_username: ${{ vars.ARTIFACTORY_USERNAME }} + - uses: mongodb-labs/drivers-github-tools/python/publish@v2 + with: + version: ${{ inputs.version }} + following_version: ${{ inputs.following_version }} + product_name: ${{ env.PRODUCT_NAME }} + silk_asset_group: ${{ env.SILK_ASSET_GROUP }} + evergreen_project: ${{ env.EVERGREEN_PROJECT }} + token: ${{ github.token }} + dry_run: ${{ inputs.dry_run }} diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 624cff1bf2..b93c93c022 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -2,7 +2,9 @@ name: Python Tests on: push: + branches: ["master", "v**"] pull_request: + workflow_dispatch: concurrency: group: tests-${{ github.ref }} @@ -14,13 +16,13 @@ defaults: jobs: - lint: + static: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v3 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: "3.8" cache: 'pip' cache-dependency-path: 'pyproject.toml' - name: Install Python dependencies @@ -29,7 +31,19 @@ jobs: - name: Run linters run: | tox -m lint-manual - tox -m manifest + - name: Run compilation + run: | + export PYMONGO_C_EXT_MUST_BUILD=1 + pip install -v -e . + python tools/fail_if_no_c.py + - name: Run typecheck + run: | + tox -m typecheck + - run: | + sudo apt-get install -y cppcheck + - run: | + cppcheck --force bson + cppcheck pymongo build: # supercharge/mongodb-github-action requires containers so we don't test other platforms @@ -37,12 +51,12 @@ jobs: strategy: matrix: os: [ubuntu-20.04] - python-version: ["3.7", "3.11", "pypy-3.8"] + python-version: ["3.8", "3.11", "pypy-3.9"] name: CPython ${{ matrix.python-version }}-${{ matrix.os }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' @@ -51,7 +65,7 @@ jobs: run: | pip install -q tox - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.7.0 + uses: supercharge/mongodb-github-action@1.10.0 with: mongodb-version: 4.4 - name: Run tests @@ -62,9 +76,9 @@ jobs: runs-on: ubuntu-latest name: DocTest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: "3.8" cache: 'pip' @@ -73,64 +87,86 @@ jobs: run: | pip install -q tox - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.7.0 + uses: supercharge/mongodb-github-action@1.10.0 with: mongodb-version: 4.4 - name: Run tests run: | tox -m doc-test - typing: - name: Typing Tests + docs: + name: Docs Checks runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 with: - python-version: "3.11" cache: 'pip' cache-dependency-path: 'pyproject.toml' + # Build docs on lowest supported Python for furo + python-version: '3.8' - name: Install dependencies run: | pip install -q tox - - name: Run typecheck + - name: Build docs run: | - tox -m typecheck + tox -m doc - docs: - name: Docs Checks + linkcheck: + name: Link Check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 with: cache: 'pip' cache-dependency-path: 'pyproject.toml' + # Build docs on lowest supported Python for furo + python-version: '3.8' - name: Install dependencies run: | pip install -q tox - - name: Check links + - name: Build docs run: | tox -m linkcheck - - name: Build docs + + typing: + name: Typing Tests + runs-on: ubuntu-latest + strategy: + matrix: + python: ["3.8", "3.11"] + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "${{matrix.python}}" + cache: 'pip' + cache-dependency-path: 'pyproject.toml' + - name: Install dependencies run: | - tox -m doc + pip install -q tox + - name: Run typecheck + run: | + tox -m typecheck make_sdist: runs-on: ubuntu-latest name: "Make an sdist" steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 with: cache: 'pip' cache-dependency-path: 'pyproject.toml' + # Build sdist on lowest supported Python + python-version: '3.8' - name: Build SDist shell: bash run: | pip install build python -m build --sdist - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: "sdist" path: dist/*.tar.gz @@ -142,22 +178,28 @@ jobs: timeout-minutes: 20 steps: - name: Download sdist - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 - name: Unpack SDist shell: bash run: | cd sdist + ls mkdir test tar --strip-components=1 -zxf *.tar.gz -C ./test - - uses: actions/setup-python@v2 + ls test + - uses: actions/setup-python@v5 with: cache: 'pip' cache-dependency-path: 'sdist/test/pyproject.toml' + # Test sdist on lowest supported Python + python-version: '3.8' - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.7.0 + uses: supercharge/mongodb-github-action@1.10.0 - name: Run Test shell: bash run: | cd sdist/test + ls + which python pip install -e ".[test]" pytest -v diff --git a/.gitignore b/.gitignore index 77483d26b2..69dd20efa3 100644 --- a/.gitignore +++ b/.gitignore @@ -9,15 +9,20 @@ build/ doc/_build/ dist/ tools/settings.py +drivers-evergreen-tools pymongo.egg-info/ *.so -*.egg +*.egg* .tox mongocryptd.pid .idea/ +.vscode/ .nova/ venv/ secrets-export.sh +libmongocrypt.tar.gz +libmongocrypt/ +libmongocrypt_git/ # Lambda temp files test/lambda/.aws-sam @@ -25,3 +30,7 @@ test/lambda/env.json test/lambda/mongodb/pymongo/* test/lambda/mongodb/gridfs/* test/lambda/mongodb/bson/* + +# test results and logs +xunit-results/ +server.log diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 00a03defcd..1a567b73f0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.1.0 + rev: v4.5.0 hooks: - id: check-added-large-files - id: check-case-conflict @@ -17,22 +17,16 @@ repos: exclude: .patch exclude_types: [json] -- repo: https://github.com/psf/black - rev: 22.3.0 - hooks: - - id: black - files: \.py$ - args: [--line-length=100] - - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.1.0 + rev: v0.1.3 hooks: - id: ruff args: ["--fix", "--show-fixes"] + - id: ruff-format - repo: https://github.com/adamchainz/blacken-docs - rev: "1.13.0" + rev: "1.16.0" hooks: - id: blacken-docs additional_dependencies: @@ -55,7 +49,7 @@ repos: # We use the Python version instead of the original version which seems to require Docker # https://github.com/koalaman/shellcheck-precommit - repo: https://github.com/shellcheck-py/shellcheck-py - rev: v0.8.0.4 + rev: v0.9.0.6 hooks: - id: shellcheck name: shellcheck @@ -63,14 +57,14 @@ repos: stages: [manual] - repo: https://github.com/PyCQA/doc8 - rev: 0.11.1 + rev: v1.1.1 hooks: - id: doc8 args: ["--ignore=D001"] # ignore line length stages: [manual] - repo: https://github.com/sirosen/check-jsonschema - rev: 0.14.1 + rev: 0.27.0 hooks: - id: check-jsonschema name: "Check GitHub Workflows" @@ -80,15 +74,16 @@ repos: stages: [manual] - repo: https://github.com/ariebovenberg/slotscheck - rev: v0.14.0 + rev: v0.17.0 hooks: - id: slotscheck files: \.py$ exclude: "^(test|tools)/" stages: [manual] + args: ["--no-strict-imports"] - repo: https://github.com/codespell-project/codespell - rev: "v2.2.4" + rev: "v2.2.6" hooks: - id: codespell # Examples of errors or updates to justify the exceptions: @@ -98,4 +93,4 @@ repos: # - test/test_bson.py:267: isnt ==> isn't # - test/versioned-api/crud-api-version-1-strict.json:514: nin ==> inn, min, bin, nine # - test/test_client.py:188: te ==> the, be, we, to - args: ["-L", "fle,fo,infinit,isnt,nin,te"] + args: ["-L", "fle,fo,infinit,isnt,nin,te,aks"] diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 39c86fff03..a3693074f6 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -16,7 +16,7 @@ python: # Install pymongo itself. - method: pip path: . - - requirements: doc/docs-requirements.txt + - requirements: requirements/docs.txt build: os: ubuntu-22.04 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..bcfc553748 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,242 @@ +# Contributing to PyMongo + +PyMongo has a large +[community](https://pymongo.readthedocs.io/en/stable/contributors.html) +and contributions are always encouraged. Contributions can be as simple +as minor tweaks to the documentation. Please read these guidelines +before sending a pull request. + +## Bugfixes and New Features + +Before starting to write code, look for existing +[tickets](https://jira.mongodb.org/browse/PYTHON) or [create +one](https://jira.mongodb.org/browse/PYTHON) for your specific issue or +feature request. That way you avoid working on something that might not +be of interest or that has already been addressed. + +## Supported Interpreters + +PyMongo supports CPython 3.8+ and PyPy3.9+. Language features not +supported by all interpreters can not be used. + +## Style Guide + +PyMongo follows [PEP8](http://www.python.org/dev/peps/pep-0008/) +including 4 space indents and 79 character line limits. + +## General Guidelines + +- Avoid backward breaking changes if at all possible. +- Write inline documentation for new classes and methods. +- Write tests and make sure they pass (make sure you have a mongod + running on the default port, then execute `tox -e test` from the cmd + line to run the test suite). +- Add yourself to doc/contributors.rst `:)` + +## Authoring a Pull Request + +**Our Pull Request Policy is based on this** [Code Review Developer +Guide](https://google.github.io/eng-practices/review) + +The expectation for any code author is to provide all the context needed +in the space of a pull request for any engineer to feel equipped to +review the code. Depending on the type of change, do your best to +highlight important new functions or objects you've introduced in the +code; think complex functions or new abstractions. Whilst it may seem +like more work for you to adjust your pull request, the reality is your +likelihood for getting review sooner shoots up. + +**Self Review Guidelines to follow** + +- If the PR is too large, split it if possible. + + - Use 250 LoC (excluding test data and config changes) as a + rule-of-thumb. + + - Moving and changing code should be in separate PRs or commits. + + - Moving: Taking large code blobs and transplanting + them to another file. There\'s generally no (or very + little) actual code changed other than a cut and + paste. It can even be extended to large deletions. + - Changing: Adding code changes (be that refactors or + functionality additions/subtractions). + - These two, when mixed, can muddy understanding and + sometimes make it harder for reviewers to keep track + of things. + +- Prefer explaining with code comments instead of PR comments. + +**Provide background** + +- The PR description and linked tickets should answer the "what" and + "why" of the change. The code change explains the "how". + +**Follow the Template** + +- Please do not deviate from the template we make; it is there for a + lot of reasons. If it is a one line fix, we still need to have + context on what and why it is needed. + +- If making a versioning change, please let that be known. See examples below: + + - `versionadded:: 3.11` + - `versionchanged:: 3.5` + +**Pull Request Template Breakdown** + +- **Github PR Title** + + - The PR Title format should always be + `[JIRA-ID] : Jira Title or Blurb Summary`. + +- **JIRA LINK** + +- Convenient link to the associated JIRA ticket. + +- **Summary** + + - Small blurb on why this is needed. The JIRA task should have + the more in-depth description, but this should still, at a + high level, give anyone looking an understanding of why the + PR has been checked in. + +- **Changes in this PR** + + - The explicit code changes that this PR is introducing. This + should be more specific than just the task name. (Unless the + task name is very clear). + +- **Test Plan** + + - Everything needs a test description. Describe what you did + to validate your changes actually worked; if you did + nothing, then document you did not test it. Aim to make + these steps reproducible by other engineers, specifically + with your primary reviewer in mind. + +- **Screenshots** + + - Any images that provide more context to the PR. Usually, + these just coincide with the test plan. + +- **Callouts or follow-up items** + + - This is a good place for identifying "to-dos" that you've + placed in the code (Must have an accompanying JIRA Ticket). + - Potential bugs that you are unsure how to test in the code. + - Opinions you want to receive about your code. + +## Running Linters + +PyMongo uses [pre-commit](https://pypi.org/project/pre-commit/) for +managing linting of the codebase. `pre-commit` performs various checks +on all files in PyMongo and uses tools that help follow a consistent +code style within the codebase. + +To set up `pre-commit` locally, run: + +```bash +brew install pre-commit +pre-commit install +``` + +To run `pre-commit` manually, run: + +```bash +pre-commit run --all-files +``` + +To run a manual hook like `mypy` manually, run: + +```bash +pre-commit run --all-files --hook-stage manual mypy +``` + +Typically we use `tox` to run the linters, e.g. + +```bash +tox -e typecheck-mypy +tox -e lint-manual +``` + +## Documentation + +To contribute to the [API +documentation](https://pymongo.readthedocs.io/en/stable/) just make your +changes to the inline documentation of the appropriate [source +code](https://github.com/mongodb/mongo-python-driver) or [rst +file](https://github.com/mongodb/mongo-python-driver/tree/master/doc) in +a branch and submit a [pull +request](https://help.github.com/articles/using-pull-requests). You +might also use the GitHub +[Edit](https://github.com/blog/844-forking-with-the-edit-button) button. + +We use [reStructuredText](https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html) for all +documentation including narrative docs, and the [Sphinx docstring format](https://sphinx-rtd-tutorial.readthedocs.io/en/latest/docstrings.html). + +You can build the documentation locally by running: + +```bash +tox -e doc +``` + +When updating docs, it can be helpful to run the live docs server as: + +```bash +tox -e doc-serve +``` + +Browse to the link provided, and then as you make changes to docstrings or narrative docs, +the pages will re-render and the browser will automatically refresh. + + +## Running Tests Locally + +- Ensure you have started the appropriate Mongo Server(s). +- Run `pip install tox` to use `tox` for testing or run + `pip install -e ".[test]"` to run `pytest` directly. +- Run `tox -m test` or `pytest` to run all of the tests. +- Append `test/.py::::` to run + specific tests. You can omit the `` to test a full class + and the `` to test a full module. For example: + `tox -m test -- test/test_change_stream.py::TestUnifiedChangeStreamsErrors::test_change_stream_errors_on_ElectionInProgress`. +- Use the `-k` argument to select tests by pattern. + +## Running Load Balancer Tests Locally + +- Install `haproxy` (available as `brew install haproxy` on macOS). +- Clone `drivers-evergreen-tools`: + `git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git`. +- Start the servers using + `LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=$PWD/drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=$PWD/drivers-evergreen-tools/.evergreen/orchestration $PWD/drivers-evergreen-tools/.evergreen/run-orchestration.sh`. +- Start the load balancer using: + `MONGODB_URI='mongodb://localhost:27017,localhost:27018/' $PWD/drivers-evergreen-tools/.evergreen/run-load-balancer.sh start`. +- Run the tests from the `pymongo` checkout directory using: + `TEST_LOADBALANCER=1 tox -m test-eg`. + +## Running Encryption Tests Locally +- Run `AWS_PROFILE= tox -m setup-encryption` after setting up your AWS profile with `aws configure sso`. +- Run the tests with `TEST_ENCRYPTION=1 tox -e test-eg`. +- When done, run `tox -m teardown-encryption` to clean up. + +## Re-sync Spec Tests + +If you would like to re-sync the copy of the specification tests in the +PyMongo repository with that which is inside the [specifications +repo](https://github.com/mongodb/specifications), please use the script +provided in `.evergreen/resync-specs.sh`.: + +```bash +git clone git@github.com:mongodb/specifications.git +export MDB_SPECS=~/specifications +cd ~/mongo-python-driver/.evergreen +./resync-specs.sh -b "" spec1 spec2 ... +./resync-specs.sh -b "connection-string*" crud bson-corpus # Updates crud and bson-corpus specs while ignoring all files with the regex "connection-string*" +cd .. +``` + +The `-b` flag adds as a regex pattern to block files you do not wish to +update in PyMongo. This is primarily helpful if you are implementing a +new feature in PyMongo that has spec tests already implemented, or if +you are attempting to validate new spec tests in PyMongo. diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 07d6f1d77c..0000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,171 +0,0 @@ -Contributing to PyMongo -======================= - -PyMongo has a large `community -`_ and -contributions are always encouraged. Contributions can be as simple as -minor tweaks to the documentation. Please read these guidelines before -sending a pull request. - -Bugfixes and New Features -------------------------- - -Before starting to write code, look for existing `tickets -`_ or `create one -`_ for your specific -issue or feature request. That way you avoid working on something -that might not be of interest or that has already been addressed. - -Supported Interpreters ----------------------- - -PyMongo supports CPython 3.7+ and PyPy3.8+. Language -features not supported by all interpreters can not be used. - -Style Guide ------------ - -PyMongo follows `PEP8 `_ -including 4 space indents and 79 character line limits. - -General Guidelines ------------------- - -- Avoid backward breaking changes if at all possible. -- Write inline documentation for new classes and methods. -- Write tests and make sure they pass (make sure you have a mongod - running on the default port, then execute ``tox -m test`` - from the cmd line to run the test suite). -- Add yourself to doc/contributors.rst :) - -Authoring a Pull Request ------------------------- - -**Our Pull Request Policy is based on this** `Code Review Developer Guide `_ - -The expectation for any code author is to provide all the context needed in the space of a -pull request for any engineer to feel equipped to review the code. Depending on the type of -change, do your best to highlight important new functions or objects you’ve introduced in the -code; think complex functions or new abstractions. Whilst it may seem like more work for you to -adjust your pull request, the reality is your likelihood for getting review sooner shoots -up. - -**Self Review Guidelines to follow** - -- If the PR is too large, split it if possible. - - Use 250 LoC (excluding test data and config changes) as a rule-of-thumb. - - Moving and changing code should be in separate PRs or commits. - - Moving: Taking large code blobs and transplanting them to another file. There's generally no (or very little) actual code changed other than a cut and paste. It can even be extended to large deletions. - - Changing: Adding code changes (be that refactors or functionality additions/subtractions). - - These two, when mixed, can muddy understanding and sometimes make it harder for reviewers to keep track of things. -- Prefer explaining with code comments instead of PR comments. - -**Provide background** - -- The PR description and linked tickets should answer the "what" and "why" of the change. The code change explains the "how". - -**Follow the Template** - -- Please do not deviate from the template we make; it is there for a lot of reasons. If it is a one line fix, we still need to have context on what and why it is needed. -- If making a versioning change, please let that be known. See examples below: - - ``versionadded:: 3.11`` - - ``versionchanged:: 3.5`` - - -**Pull Request Template Breakdown** - -- **Github PR Title** - - The PR Title format should always be ``[JIRA-ID] : Jira Title or Blurb Summary``. - -- **JIRA LINK** - - Convenient link to the associated JIRA ticket. - -- **Summary** - - Small blurb on why this is needed. The JIRA task should have the more in-depth description, but this should still, at a high level, give anyone looking an understanding of why the PR has been checked in. - -- **Changes in this PR** - - The explicit code changes that this PR is introducing. This should be more specific than just the task name. (Unless the task name is very clear). - -- **Test Plan** - - Everything needs a test description. Describe what you did to validate your changes actually worked; if you did nothing, then document you did not test it. Aim to make these steps reproducible by other engineers, specifically with your primary reviewer in mind. - -- **Screenshots** - - Any images that provide more context to the PR. Usually, these just coincide with the test plan. - -- **Callouts or follow-up items** - - This is a good place for identifying “to-dos” that you’ve placed in the code (Must have an accompanying JIRA Ticket). - - Potential bugs that you are unsure how to test in the code. - - Opinions you want to receive about your code. - - -Running Linters ---------------- - -PyMongo uses `pre-commit `_ -for managing linting of the codebase. -``pre-commit`` performs various checks on all files in PyMongo and uses tools -that help follow a consistent code style within the codebase. - -To set up ``pre-commit`` locally, run:: - - pip install pre-commit - pre-commit install - -To run ``pre-commit`` manually, run:: - - pre-commit run --all-files - -To run a manual hook like ``flake8`` manually, run:: - - pre-commit run --all-files --hook-stage manual flake8 - -Documentation -------------- - -To contribute to the `API documentation `_ -just make your changes to the inline documentation of the appropriate -`source code `_ or `rst file -`_ in a -branch and submit a `pull request `_. -You might also use the GitHub `Edit `_ -button. - -Running Tests Locally ---------------------- -- Ensure you have started the appropriate Mongo Server(s). -- Run ``pip install tox`` to use ``tox`` for testing or run ``pip install -e ".[test]"`` to run ``pytest`` directly. -- Run ``tox -m test`` or ``pytest`` to run all of the tests. -- Append ``test/.py::::`` to - run specific tests. You can omit the ```` to test a full class - and the ```` to test a full module. For example: - ``tox -m test test/test_change_stream.py::TestUnifiedChangeStreamsErrors::test_change_stream_errors_on_ElectionInProgress``. -- Use the ``-k`` argument to select tests by pattern. - -Running Load Balancer Tests Locally ------------------------------------ -- Install ``haproxy`` (available as ``brew install haproxy`` on macOS). -- Clone ``drivers-evergreen-tools``: ``git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git``. -- Start the servers using ``LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=$PWD/drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=$PWD/drivers-evergreen-tools/.evergreen/orchestration $PWD/drivers-evergreen-tools/.evergreen/run-orchestration.sh``. -- Start the load balancer using: ``MONGODB_URI='mongodb://localhost:27017,localhost:27018/' $PWD/drivers-evergreen-tools/.evergreen/run-load-balancer.sh start``. -- Run the tests from the ``pymongo`` checkout directory using: ``TEST_LOADBALANCER=1 tox -m test-eg``. - -Re-sync Spec Tests ------------------- - -If you would like to re-sync the copy of the specification tests in the -PyMongo repository with that which is inside the `specifications repo -`_, please -use the script provided in ``.evergreen/resync-specs.sh``.:: - - git clone git@github.com:mongodb/specifications.git - export MDB_SPECS=~/specifications - cd ~/mongo-python-driver/.evergreen - ./resync-specs.sh -b "" spec1 spec2 ... - ./resync-specs.sh -b "connection-string*" crud bson-corpus # Updates crud and bson-corpus specs while ignoring all files with the regex "connection-string*" - cd .. - -The ``-b`` flag adds as a regex pattern to block files you do not wish to -update in PyMongo. -This is primarily helpful if you are implementing a new feature in PyMongo -that has spec tests already implemented, or if you are attempting to -validate new spec tests in PyMongo. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 444da54d57..0000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,32 +0,0 @@ -include README.rst -include LICENSE -include THIRD-PARTY-NOTICES -include *.ini -exclude .coveragerc -exclude .flake8 -exclude .git-blame-ignore-revs -exclude .pre-commit-config.yaml -exclude .readthedocs.yaml -exclude CONTRIBUTING.rst -exclude RELEASE.rst -recursive-include doc *.rst -recursive-include doc *.py -recursive-include doc *.conf -recursive-include doc *.css -recursive-include doc *.js -recursive-include doc *.png -include doc/Makefile -include doc/_templates/layout.html -include doc/docs-requirements.txt -include doc/make.bat -include doc/static/periodic-executor-refs.dot -recursive-include tools *.py -include tools/README.rst -include green_framework_test.py -recursive-include test *.pem -recursive-include test *.py -recursive-include test *.json -recursive-include bson *.h -prune test/mod_wsgi_test -prune test/lambda -prune .evergreen diff --git a/README.md b/README.md new file mode 100644 index 0000000000..3d13f1aa9a --- /dev/null +++ b/README.md @@ -0,0 +1,221 @@ +# PyMongo + +[![PyPI Version](https://img.shields.io/pypi/v/pymongo)](https://pypi.org/project/pymongo) +[![Python Versions](https://img.shields.io/pypi/pyversions/pymongo)](https://pypi.org/project/pymongo) +[![Monthly Downloads](https://static.pepy.tech/badge/pymongo/month)](https://pepy.tech/project/pymongo) +[![Documentation Status](https://readthedocs.org/projects/pymongo/badge/?version=stable)](http://pymongo.readthedocs.io/en/stable/?badge=stable) + +## About + +The PyMongo distribution contains tools for interacting with MongoDB +database from Python. The `bson` package is an implementation of the +[BSON format](http://bsonspec.org) for Python. The `pymongo` package is +a native Python driver for MongoDB. The `gridfs` package is a +[gridfs](https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.rst/) +implementation on top of `pymongo`. + +PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, 5.0, 6.0, and 7.0. + +## Support / Feedback + +For issues with, questions about, or feedback for PyMongo, please look +into our [support channels](https://support.mongodb.com/welcome). Please +do not email any of the PyMongo developers directly with issues or +questions - you're more likely to get an answer on +[StackOverflow](https://stackoverflow.com/questions/tagged/mongodb) +(using a "mongodb" tag). + +## Bugs / Feature Requests + +Think you've found a bug? Want to see a new feature in PyMongo? Please +open a case in our issue management tool, JIRA: + +- [Create an account and login](https://jira.mongodb.org). +- Navigate to [the PYTHON + project](https://jira.mongodb.org/browse/PYTHON). +- Click **Create Issue** - Please provide as much information as + possible about the issue type and how to reproduce it. + +Bug reports in JIRA for all driver projects (i.e. PYTHON, CSHARP, JAVA) +and the Core Server (i.e. SERVER) project are **public**. + +### How To Ask For Help + +Please include all of the following information when opening an issue: + +- Detailed steps to reproduce the problem, including full traceback, + if possible. + +- The exact python version used, with patch level: + +```bash +python -c "import sys; print(sys.version)" +``` + +- The exact version of PyMongo used, with patch level: + +```bash +python -c "import pymongo; print(pymongo.version); print(pymongo.has_c())" +``` + +- The operating system and version (e.g. Windows 7, OSX 10.8, ...) + +- Web framework or asynchronous network library used, if any, with + version (e.g. Django 1.7, mod_wsgi 4.3.0, gevent 1.0.1, Tornado + 4.0.2, ...) + +### Security Vulnerabilities + +If you've identified a security vulnerability in a driver or any other +MongoDB project, please report it according to the [instructions +here](https://www.mongodb.com/docs/manual/tutorial/create-a-vulnerability-report/). + +## Installation + +PyMongo can be installed with [pip](http://pypi.python.org/pypi/pip): + +```bash +python -m pip install pymongo +``` + +You can also download the project source and do: + +```bash +pip install . +``` + +Do **not** install the "bson" package from pypi. PyMongo comes with +its own bson package; running "pip install bson" installs a third-party +package that is incompatible with PyMongo. + +## Dependencies + +PyMongo supports CPython 3.8+ and PyPy3.9+. + +Required dependencies: + +Support for `mongodb+srv://` URIs requires [dnspython](https://pypi.python.org/pypi/dnspython) + +Optional dependencies: + +GSSAPI authentication requires +[pykerberos](https://pypi.python.org/pypi/pykerberos) on Unix or +[WinKerberos](https://pypi.python.org/pypi/winkerberos) on Windows. The +correct dependency can be installed automatically along with PyMongo: + +```bash +python -m pip install "pymongo[gssapi]" +``` + +MONGODB-AWS authentication requires +[pymongo-auth-aws](https://pypi.org/project/pymongo-auth-aws/): + +```bash +python -m pip install "pymongo[aws]" +``` + +OCSP (Online Certificate Status Protocol) requires +[PyOpenSSL](https://pypi.org/project/pyOpenSSL/), +[requests](https://pypi.org/project/requests/), +[service_identity](https://pypi.org/project/service_identity/) and may +require [certifi](https://pypi.python.org/pypi/certifi): + +```bash +python -m pip install "pymongo[ocsp]" +``` + +Wire protocol compression with snappy requires +[python-snappy](https://pypi.org/project/python-snappy): + +```bash +python -m pip install "pymongo[snappy]" +``` + +Wire protocol compression with zstandard requires +[zstandard](https://pypi.org/project/zstandard): + +```bash +python -m pip install "pymongo[zstd]" +``` + +Client-Side Field Level Encryption requires +[pymongocrypt](https://pypi.org/project/pymongocrypt/) and +[pymongo-auth-aws](https://pypi.org/project/pymongo-auth-aws/): + +```bash +python -m pip install "pymongo[encryption]" +``` +You can install all dependencies automatically with the following +command: + +```bash +python -m pip install "pymongo[gssapi,aws,ocsp,snappy,zstd,encryption]" +``` + +Additional dependencies are: + +- (to generate documentation or run tests) + [tox](https://tox.wiki/en/latest/index.html) + +## Examples + +Here's a basic example (for more see the *examples* section of the +docs): + +```pycon +>>> import pymongo +>>> client = pymongo.MongoClient("localhost", 27017) +>>> db = client.test +>>> db.name +'test' +>>> db.my_collection +Collection(Database(MongoClient('localhost', 27017), 'test'), 'my_collection') +>>> db.my_collection.insert_one({"x": 10}).inserted_id +ObjectId('4aba15ebe23f6b53b0000000') +>>> db.my_collection.insert_one({"x": 8}).inserted_id +ObjectId('4aba160ee23f6b543e000000') +>>> db.my_collection.insert_one({"x": 11}).inserted_id +ObjectId('4aba160ee23f6b543e000002') +>>> db.my_collection.find_one() +{'x': 10, '_id': ObjectId('4aba15ebe23f6b53b0000000')} +>>> for item in db.my_collection.find(): +... print(item["x"]) +... +10 +8 +11 +>>> db.my_collection.create_index("x") +'x_1' +>>> for item in db.my_collection.find().sort("x", pymongo.ASCENDING): +... print(item["x"]) +... +8 +10 +11 +>>> [item["x"] for item in db.my_collection.find().limit(2).skip(1)] +[8, 11] +``` + +## Documentation + +Documentation is available at +[pymongo.readthedocs.io](https://pymongo.readthedocs.io/en/stable/). + +Documentation can be generated by running **tox -m doc**. Generated +documentation can be found in the `doc/build/html/` directory. + +## Learning Resources + +- MongoDB Learn - [Python +courses](https://learn.mongodb.com/catalog?labels=%5B%22Language%22%5D&values=%5B%22Python%22%5D). +- [Python Articles on Developer +Center](https://www.mongodb.com/developer/languages/python/). + +## Testing + +The easiest way to run the tests is to run **tox -m test** in the root +of the distribution. For example, + +```bash +tox -e test +``` diff --git a/README.rst b/README.rst deleted file mode 100644 index 3172ecb8aa..0000000000 --- a/README.rst +++ /dev/null @@ -1,212 +0,0 @@ -======= -PyMongo -======= -:Info: See `the mongo site `_ for more information. See `GitHub `_ for the latest source. -:Documentation: Available at `pymongo.readthedocs.io `_ -:Author: The MongoDB Python Team - -About -===== - -The PyMongo distribution contains tools for interacting with MongoDB -database from Python. The ``bson`` package is an implementation of -the `BSON format `_ for Python. The ``pymongo`` -package is a native Python driver for MongoDB. The ``gridfs`` package -is a `gridfs -`_ -implementation on top of ``pymongo``. - -PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, 5.0, 6.0, and 7.0. - -Support / Feedback -================== - -For issues with, questions about, or feedback for PyMongo, please look into -our `support channels `_. Please -do not email any of the PyMongo developers directly with issues or -questions - you're more likely to get an answer on `StackOverflow `_ -(using a "mongodb" tag). - -Bugs / Feature Requests -======================= - -Think you’ve found a bug? Want to see a new feature in PyMongo? Please open a -case in our issue management tool, JIRA: - -- `Create an account and login `_. -- Navigate to `the PYTHON project `_. -- Click **Create Issue** - Please provide as much information as possible about the issue type and how to reproduce it. - -Bug reports in JIRA for all driver projects (i.e. PYTHON, CSHARP, JAVA) and the -Core Server (i.e. SERVER) project are **public**. - -How To Ask For Help -------------------- - -Please include all of the following information when opening an issue: - -- Detailed steps to reproduce the problem, including full traceback, if possible. -- The exact python version used, with patch level:: - - $ python -c "import sys; print(sys.version)" - -- The exact version of PyMongo used, with patch level:: - - $ python -c "import pymongo; print(pymongo.version); print(pymongo.has_c())" - -- The operating system and version (e.g. Windows 7, OSX 10.8, ...) -- Web framework or asynchronous network library used, if any, with version (e.g. - Django 1.7, mod_wsgi 4.3.0, gevent 1.0.1, Tornado 4.0.2, ...) - -Security Vulnerabilities ------------------------- - -If you’ve identified a security vulnerability in a driver or any other -MongoDB project, please report it according to the `instructions here -`_. - -Installation -============ - -PyMongo can be installed with `pip `_:: - - $ python -m pip install pymongo - -Or ``easy_install`` from -`setuptools `_:: - - $ python -m easy_install pymongo - -You can also download the project source and do:: - - $ pip install . - -Do **not** install the "bson" package from pypi. PyMongo comes with its own -bson package; doing "easy_install bson" installs a third-party package that -is incompatible with PyMongo. - -Dependencies -============ - -PyMongo supports CPython 3.7+ and PyPy3.7+. - -Required dependencies: - -Support for mongodb+srv:// URIs requires `dnspython -`_ - -Optional dependencies: - -GSSAPI authentication requires `pykerberos -`_ on Unix or `WinKerberos -`_ on Windows. The correct -dependency can be installed automatically along with PyMongo:: - - $ python -m pip install "pymongo[gssapi]" - -MONGODB-AWS authentication requires `pymongo-auth-aws -`_:: - - $ python -m pip install "pymongo[aws]" - -OCSP (Online Certificate Status Protocol) requires `PyOpenSSL -`_, `requests -`_, `service_identity -`_ and may -require `certifi -`_:: - - $ python -m pip install "pymongo[ocsp]" - -Wire protocol compression with snappy requires `python-snappy -`_:: - - $ python -m pip install "pymongo[snappy]" - -Wire protocol compression with zstandard requires `zstandard -`_:: - - $ python -m pip install "pymongo[zstd]" - -Client-Side Field Level Encryption requires `pymongocrypt -`_ and -`pymongo-auth-aws `_:: - - $ python -m pip install "pymongo[encryption]" - -You can install all dependencies automatically with the following -command:: - - $ python -m pip install "pymongo[gssapi,aws,ocsp,snappy,zstd,encryption]" - -Additional dependencies are: - -- (to generate documentation or run tests) tox_ - -Examples -======== -Here's a basic example (for more see the *examples* section of the docs): - -.. code-block:: pycon - - >>> import pymongo - >>> client = pymongo.MongoClient("localhost", 27017) - >>> db = client.test - >>> db.name - 'test' - >>> db.my_collection - Collection(Database(MongoClient('localhost', 27017), 'test'), 'my_collection') - >>> db.my_collection.insert_one({"x": 10}).inserted_id - ObjectId('4aba15ebe23f6b53b0000000') - >>> db.my_collection.insert_one({"x": 8}).inserted_id - ObjectId('4aba160ee23f6b543e000000') - >>> db.my_collection.insert_one({"x": 11}).inserted_id - ObjectId('4aba160ee23f6b543e000002') - >>> db.my_collection.find_one() - {'x': 10, '_id': ObjectId('4aba15ebe23f6b53b0000000')} - >>> for item in db.my_collection.find(): - ... print(item["x"]) - ... - 10 - 8 - 11 - >>> db.my_collection.create_index("x") - 'x_1' - >>> for item in db.my_collection.find().sort("x", pymongo.ASCENDING): - ... print(item["x"]) - ... - 8 - 10 - 11 - >>> [item["x"] for item in db.my_collection.find().limit(2).skip(1)] - [8, 11] - -Documentation -============= - -Documentation is available at `pymongo.readthedocs.io `_. - -Documentation can be generated by running **tox -m doc**. Generated documentation can be found in the -*doc/build/html/* directory. - -Learning Resources -================== - -MongoDB Learn - `Python courses `_. -`Python Articles on Developer Center `_. - -Testing -======= - -The easiest way to run the tests is to run **tox -m test** in -the root of the distribution. - -To verify that PyMongo works with Gevent's monkey-patching:: - - $ python green_framework_test.py gevent - -Or with Eventlet's:: - - $ python green_framework_test.py eventlet - -.. _tox: https://tox.wiki/en/latest/index.html diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000000..3c2990df08 --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,109 @@ +# Some notes on PyMongo releases + +## Versioning + +We follow [semver](https://semver.org/) and [pep-0440](https://www.python.org/dev/peps/pep-0440) +for versioning. + +We shoot for a release every few months - that will generally just +increment the middle / minor version number (e.g. `3.5.0` -> `3.6.0`). + +Patch releases are reserved for bug fixes (in general no new features or +deprecations) - they only happen in cases where there is a critical bug +in a recently released version, or when a release has no new features or +API changes. + +In between releases we add `.devN` to the version number to denote the +version under development. So if we just released `3.6.0`, then the +current dev version might be `3.6.1.dev0` or `3.7.0.dev0`. When we make the +next release we replace all instances of `3.x.x.devN` in the docs with the +new version number. + +## Deprecation + +Changes should be backwards compatible unless absolutely necessary. When +making API changes the approach is generally to add a deprecation +warning but keeping the existing API functional. Deprecated features can +be removed in a release that changes the major version number. + +## Doing a Release + +1. PyMongo is tested on Evergreen. Ensure the latest commit are passing + [CI](https://spruce.mongodb.com/commits/mongo-python-driver) as expected. + +2. Check Jira to ensure all the tickets in this version have been + completed. + +3. Make a PR that adds the release notes to `doc/changelog.rst`. Generally just + summarize/clarify the git log, but you might add some more long form + notes for big changes. + +4. Merge the PR. + +5. Clone the source repository in a temporary directory and check out the + release branch. + +6. Update the version number in `pymongo/_version.py`. + +7. Commit the change, e.g. `git add . && git commit -m "BUMP "` + +7. Tag w/ version_number, eg, + `git tag -a '4.1.0' -m 'BUMP 4.1.0'`. + +8. Bump the version number to `.dev0` in + `pymongo/_version.py`, commit, push. + +9. Push commit / tag, eg `git push && git push --tags`. + +10. Pushing a tag will trigger the release process on GitHub Actions + that will require a member of the team to authorize the deployment. + Navigate to https://github.com/mongodb/mongo-python-driver/actions/workflows/release-python.yml + and wait for the publish to complete. + +11. Make sure the new version appears on + `https://pymongo.readthedocs.io/en/stable/`. If the new version does not show + up automatically, trigger a rebuild of "stable" on https://readthedocs.org/projects/pymongo/builds/. + +12. Publish the release version in Jira and add a description of the release, such as a the reason + or the main feature. + +13. Announce the release on the [community forum](https://www.mongodb.com/community/forums/tags/c/announcements/driver-releases/110/python) + +14. File a ticket for DOCSP highlighting changes in server version and + Python version compatibility or the lack thereof, for example https://jira.mongodb.org/browse/DOCSP-34040 + +15. Create a GitHub Release for the tag using https://github.com/mongodb/mongo-python-driver/releases/new. + The title should be "PyMongo X.Y.Z", and the description should + contain a link to the release notes on the the community forum, e.g. + "Release notes: mongodb.com/community/forums/t/pymongo-4-0-2-released/150457" + +16. Wait for automated update PR on conda-forge, e.g.: https://github.com/conda-forge/pymongo-feedstock/pull/81 + Update dependencies if needed. + + +## Doing a Bug Fix Release + +1. If it is a new branch, first create the release branch and Evergreen project. + +- Clone the source repository in a temporary location. + +- Create a branch from the tag, e.g. `git checkout -b v4.1 4.1.0`. + +- Push the branch, e.g.: `git push origin v4.6`. + +- Create a new project in Evergreen for the branch by duplicating the "Mongo Python Driver" project. + Select the option to create a JIRA ticket for S3 bucket permissions. + +- Update the "Display Name", "Branch Name", and "Identifier". + +- Attach the project to the repository. + +- Wait for the JIRA ticket to be resolved and verify S3 upload capability with a patch release on the + new project. + +2. Create a PR against the release branch. + +3. Create a release using the "Doing a Release" checklist above, ensuring that you + check out the appropriate release branch in the source checkout. + +4. Cherry-pick the changelog PR onto the `master` branch. diff --git a/RELEASE.rst b/RELEASE.rst deleted file mode 100644 index 55e39baf5a..0000000000 --- a/RELEASE.rst +++ /dev/null @@ -1,90 +0,0 @@ -Some notes on PyMongo releases -============================== - -Versioning ----------- - -We shoot for a release every few months - that will generally just -increment the middle / minor version number (e.g. 3.5.0 -> 3.6.0). - -Patch releases are reserved for bug fixes (in general no new features -or deprecations) - they only happen in cases where there is a critical -bug in a recently released version, or when a release has no new -features or API changes. - -In between releases we add .devN to the version number to denote the version -under development. So if we just released 3.6.0, then the current dev -version might be 3.6.1.dev0 or 3.7.0.dev0. When we make the next release we -replace all instances of 3.x.x.devN in the docs with the new version number. - -https://semver.org/ -https://www.python.org/dev/peps/pep-0440/ - -Deprecation ------------ - -Changes should be backwards compatible unless absolutely necessary. When making -API changes the approach is generally to add a deprecation warning but keeping -the existing API functional. Deprecated features can be removed in a release -that changes the major version number. - -Doing a Release ---------------- - -1. PyMongo is tested on Evergreen. Ensure the latest commit are passing CI - as expected: https://evergreen.mongodb.com/waterfall/mongo-python-driver. - -2. Check Jira to ensure all the tickets in this version have been completed. - -3. Add release notes to doc/changelog.rst. Generally just summarize/clarify - the git log, but you might add some more long form notes for big changes. - -4. Make sure version number is updated in ``pymongo/_version.py`` - -5. Commit with a BUMP version_number message, eg ``git commit -m 'BUMP 3.11.0'``. - -6. Tag w/ version_number, eg, ``git tag -a '3.11.0' -m 'BUMP 3.11.0' ``. - -7. Push commit / tag, eg ``git push && git push --tags``. - -8. Pushing a tag will trigger a release process in Evergreen which builds - wheels for manylinux, macOS, and Windows. Wait for the "release-combine" - task to complete and then download the "Release files all" archive. See: - https://evergreen.mongodb.com/waterfall/mongo-python-driver?bv_filter=release - - The contents should look like this:: - - $ ls path/to/archive - pymongo--cp310-cp310-macosx_10_9_universal2.whl - ... - pymongo--cp38-cp38-manylinux2014_x86_64.whl - ... - pymongo--cp38-cp38-win_amd64.whl - ... - pymongo-.tar.gz - -9. Upload all the release packages to PyPI with twine:: - - $ python3 -m twine upload path/to/archive/* - -10. Make sure the new version appears on https://pymongo.readthedocs.io/. If the - new version does not show up automatically, trigger a rebuild of "latest": - https://readthedocs.org/projects/pymongo/builds/ - -11. Bump the version number to .dev0 in ``pymongo/_version.py``, - commit, push. - -12. Publish the release version in Jira. - -13. Announce the release on: - https://www.mongodb.com/community/forums/c/announcements/driver-releases/110 - -14. File a ticket for DOCSP highlighting changes in server version and Python - version compatibility or the lack thereof, for example: - https://jira.mongodb.org/browse/DOCSP-13536 - -15. Create a GitHub Release for the tag using - https://github.com/mongodb/mongo-python-driver/releases/new. - The title should be "PyMongo X.Y.Z", and the description should contain - a link to the release notes on the the community forum, e.g. - "Release notes: mongodb.com/community/forums/t/pymongo-4-0-2-released/150457." diff --git a/_setup.py b/_setup.py new file mode 100644 index 0000000000..65ae1908fe --- /dev/null +++ b/_setup.py @@ -0,0 +1,143 @@ +from __future__ import annotations + +import os +import sys +import warnings + +# Hack to silence atexit traceback in some Python versions +try: + import multiprocessing # noqa: F401 +except ImportError: + pass + +from setuptools import setup +from setuptools.command.build_ext import build_ext +from setuptools.extension import Extension + + +class custom_build_ext(build_ext): + """Allow C extension building to fail. + + The C extension speeds up BSON encoding, but is not essential. + """ + + warning_message = """ +******************************************************************** +WARNING: %s could not +be compiled. No C extensions are essential for PyMongo to run, +although they do result in significant speed improvements. +%s + +Please see the installation docs for solutions to build issues: + +https://pymongo.readthedocs.io/en/stable/installation.html + +Here are some hints for popular operating systems: + +If you are seeing this message on Linux you probably need to +install GCC and/or the Python development package for your +version of Python. + +Debian and Ubuntu users should issue the following command: + + $ sudo apt-get install build-essential python-dev + +Users of Red Hat based distributions (RHEL, CentOS, Amazon Linux, +Oracle Linux, Fedora, etc.) should issue the following command: + + $ sudo yum install gcc python-devel + +If you are seeing this message on Microsoft Windows please install +PyMongo using pip. Modern versions of pip will install PyMongo +from binary wheels available on pypi. If you must install from +source read the documentation here: + +https://pymongo.readthedocs.io/en/stable/installation.html#installing-from-source-on-windows + +If you are seeing this message on macOS / OSX please install PyMongo +using pip. Modern versions of pip will install PyMongo from binary +wheels available on pypi. If wheels are not available for your version +of macOS / OSX, or you must install from source read the documentation +here: + +https://pymongo.readthedocs.io/en/stable/installation.html#osx +******************************************************************** +""" + + def run(self): + try: + build_ext.run(self) + except Exception: + if os.environ.get("PYMONGO_C_EXT_MUST_BUILD"): + raise + e = sys.exc_info()[1] + sys.stdout.write("%s\n" % str(e)) + warnings.warn( + self.warning_message + % ( + "Extension modules", + "There was an issue with your platform configuration - see above.", + ), + stacklevel=2, + ) + + def build_extension(self, ext): + name = ext.name + try: + build_ext.build_extension(self, ext) + except Exception: + if os.environ.get("PYMONGO_C_EXT_MUST_BUILD"): + raise + e = sys.exc_info()[1] + sys.stdout.write("%s\n" % str(e)) + warnings.warn( + self.warning_message + % ( + "The %s extension module" % (name,), # noqa: UP031 + "The output above this warning shows how the compilation failed.", + ), + stacklevel=2, + ) + + +ext_modules = [ + Extension( + "bson._cbson", + include_dirs=["bson"], + sources=["bson/_cbsonmodule.c", "bson/time64.c", "bson/buffer.c"], + ), + Extension( + "pymongo._cmessage", + include_dirs=["bson"], + sources=[ + "pymongo/_cmessagemodule.c", + "bson/_cbsonmodule.c", + "bson/time64.c", + "bson/buffer.c", + ], + ), +] + + +if "--no_ext" in sys.argv or os.environ.get("NO_EXT"): + try: + sys.argv.remove("--no_ext") + except ValueError: + pass + ext_modules = [] +elif sys.platform.startswith("java") or sys.platform == "cli" or "PyPy" in sys.version: + sys.stdout.write( + """ +*****************************************************\n +The optional C extensions are currently not supported\n +by this python implementation.\n +*****************************************************\n +""" + ) + ext_modules = [] + +setup( + cmdclass={"build_ext": custom_build_ext}, + ext_modules=ext_modules, + packages=["bson", "pymongo", "gridfs"], +) # type:ignore diff --git a/bson/__init__.py b/bson/__init__.py index 2c4bd3a8b2..a7c9ddc509 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -886,18 +886,11 @@ def _encode_maxkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: _abc.Mapping: _encode_mapping, } - -_MARKERS = { - 5: _encode_binary, - 7: _encode_objectid, - 11: _encode_regex, - 13: _encode_code, - 17: _encode_timestamp, - 18: _encode_long, - 100: _encode_dbref, - 127: _encode_maxkey, - 255: _encode_minkey, -} +# Map each _type_marker to its encoder for faster lookup. +_MARKERS = {} +for _typ in _ENCODERS: + if hasattr(_typ, "_type_marker"): + _MARKERS[_typ._type_marker] = _ENCODERS[_typ] _BUILT_IN_TYPES = tuple(t for t in _ENCODERS) @@ -1026,12 +1019,11 @@ def encode( :class:`~bson.errors.InvalidDocument` if `document` cannot be converted to :class:`BSON`. - :Parameters: - - `document`: mapping type representing a document - - `check_keys` (optional): check if keys start with '$' or + :param document: mapping type representing a document + :param check_keys: check if keys start with '$' or contain '.', raising :class:`~bson.errors.InvalidDocument` in either case - - `codec_options` (optional): An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionadded:: 3.9 @@ -1072,10 +1064,9 @@ def decode( >>> type(decoded_doc) - :Parameters: - - `data`: the BSON to decode. Any bytes-like object that implements + :param data: the BSON to decode. Any bytes-like object that implements the buffer protocol. - - `codec_options` (optional): An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionadded:: 3.9 @@ -1141,9 +1132,8 @@ def decode_all( `data` must be a bytes-like object implementing the buffer protocol that provides concatenated, valid, BSON-encoded documents. - :Parameters: - - `data`: BSON data - - `codec_options` (optional): An instance of + :param data: BSON data + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.9 @@ -1238,20 +1228,18 @@ def _decode_all_selective( `data` must be a string representing a valid, BSON-encoded document. - :Parameters: - - `data`: BSON data - - `codec_options`: An instance of + :param data: BSON data + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions` with user-specified type decoders. If no decoders are found, this method is the same as ``decode_all``. - - `fields`: Map of document namespaces where data that needs + :param fields: Map of document namespaces where data that needs to be custom decoded lives or None. For example, to custom decode a list of objects in 'field1.subfield1', the specified value should be ``{'field1': {'subfield1': 1}}``. If ``fields`` is an empty map or None, this method is the same as ``decode_all``. - :Returns: - - `document_list`: Single-member list containing the decoded document. + :return: Single-member list containing the decoded document. .. versionadded:: 3.8 """ @@ -1298,9 +1286,8 @@ def decode_iter( `data` must be a string of concatenated, valid, BSON-encoded documents. - :Parameters: - - `data`: BSON data - - `codec_options` (optional): An instance of + :param data: BSON data + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 @@ -1346,9 +1333,8 @@ def decode_file_iter( Works similarly to the decode_all function, but reads from the file object in chunks and parses bson in chunks, yielding one document at a time. - :Parameters: - - `file_obj`: A file object containing BSON data. - - `codec_options` (optional): An instance of + :param file_obj: A file object containing BSON data. + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 @@ -1377,8 +1363,7 @@ def is_valid(bson: bytes) -> bool: :class:`bytes`. Returns ``True`` if `bson` is valid :class:`BSON`, ``False`` otherwise. - :Parameters: - - `bson`: the data to be validated + :param bson: the data to be validated """ if not isinstance(bson, bytes): raise TypeError("BSON data must be an instance of a subclass of bytes") @@ -1414,12 +1399,11 @@ def encode( :class:`str'. Raises :class:`~bson.errors.InvalidDocument` if `document` cannot be converted to :class:`BSON`. - :Parameters: - - `document`: mapping type representing a document - - `check_keys` (optional): check if keys start with '$' or + :param document: mapping type representing a document + :param check_keys: check if keys start with '$' or contain '.', raising :class:`~bson.errors.InvalidDocument` in either case - - `codec_options` (optional): An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 @@ -1447,8 +1431,7 @@ def decode( # type:ignore[override] >>> type(decoded_doc) - :Parameters: - - `codec_options` (optional): An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 4e1881a275..3b3aecc441 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -342,6 +342,9 @@ static long long millis_from_datetime(PyObject* datetime) { static PyObject* datetime_ms_from_millis(PyObject* self, long long millis){ // Allocate a new DatetimeMS object. struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } PyObject* dt; PyObject* ll_millis; @@ -455,49 +458,6 @@ static int write_string(buffer_t buffer, PyObject* py_string) { return 1; } -/* - * Are we in the main interpreter or a sub-interpreter? - * Useful for deciding if we can use cached pure python - * types in mod_wsgi. - */ -static int -_in_main_interpreter(void) { - static PyInterpreterState* main_interpreter = NULL; - PyInterpreterState* interpreter; - - if (main_interpreter == NULL) { - interpreter = PyInterpreterState_Head(); - - while (PyInterpreterState_Next(interpreter)) - interpreter = PyInterpreterState_Next(interpreter); - - main_interpreter = interpreter; - } - - return (main_interpreter == PyThreadState_Get()->interp); -} - -/* - * Get a reference to a pure python type. If we are in the - * main interpreter return the cached object, otherwise import - * the object we need and return it instead. - */ -static PyObject* -_get_object(PyObject* object, char* module_name, char* object_name) { - if (_in_main_interpreter()) { - Py_XINCREF(object); - return object; - } else { - PyObject* imported = NULL; - PyObject* module = PyImport_ImportModule(module_name); - if (!module) - return NULL; - imported = PyObject_GetAttrString(module, object_name); - Py_DECREF(module); - return imported; - } -} - /* Load a Python object to cache. * * Returns non-zero on failure. */ @@ -523,6 +483,9 @@ static int _load_python_objects(PyObject* module) { PyObject* re_compile = NULL; PyObject* compiled = NULL; struct module_state *state = GETSTATE(module); + if (!state) { + return 1; + } /* Cache commonly used attribute names to improve performance. */ if (!((state->_type_marker_str = PyUnicode_FromString("_type_marker")) && @@ -681,6 +644,9 @@ int convert_codec_options(PyObject* self, PyObject* options_obj, codec_options_t PyObject* type_registry_obj = NULL; struct module_state *state = GETSTATE(self); long type_marker; + if (!state) { + return 0; + } options->unicode_decode_error_handler = NULL; @@ -896,18 +862,18 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, const codec_options_t* options, unsigned char in_custom_call, unsigned char in_fallback_call) { - struct module_state *state = GETSTATE(self); - PyObject* mapping_type; PyObject* new_value = NULL; int retval; - PyObject* uuid_type; int is_list; + long type; + struct module_state *state = GETSTATE(self); + if (!state) { + return 0; + } /* - * Don't use PyObject_IsInstance for our custom types. It causes - * problems with python sub interpreters. Our custom types should - * have a _type_marker attribute, which we can switch on instead. + * Use _type_marker attribute instead of PyObject_IsInstance for better perf. */ - long type = _type_marker(value, state->_type_marker_str); + type = _type_marker(value, state->_type_marker_str); if (type < 0) { return 0; } @@ -987,6 +953,16 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x07; return 1; } + case 9: + { + /* DatetimeMS */ + long long millis; + if (!millis_from_datetime_ms(value, &millis)) { + return 0; + } + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; + return buffer_write_int64(buffer, (int64_t)millis); + } case 11: { /* Regex */ @@ -1282,58 +1258,32 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; return buffer_write_int64(buffer, (int64_t)millis); - } else if (PyObject_TypeCheck(value, (PyTypeObject *) state->DatetimeMS)) { - long long millis; - if (!millis_from_datetime_ms(value, &millis)) { - return 0; - } - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; - return buffer_write_int64(buffer, (int64_t)millis); } else if (PyObject_TypeCheck(value, state->REType)) { return _write_regex_to_buffer(buffer, type_byte, value, state->_flags_str, state->_pattern_str); - } - - /* - * Try Mapping and UUID last since we have to import - * them if we're in a sub-interpreter. - */ - mapping_type = _get_object(state->Mapping, "collections.abc", "Mapping"); - if (mapping_type && PyObject_IsInstance(value, mapping_type)) { - Py_DECREF(mapping_type); + } else if (PyObject_IsInstance(value, state->Mapping)) { /* PyObject_IsInstance returns -1 on error */ if (PyErr_Occurred()) { return 0; } *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return write_dict(self, buffer, value, check_keys, options, 0); - } - - uuid_type = _get_object(state->UUID, "uuid", "UUID"); - if (uuid_type && PyObject_IsInstance(value, uuid_type)) { - PyObject* binary_type = NULL; + } else if (PyObject_IsInstance(value, state->UUID)) { PyObject* binary_value = NULL; PyObject *uuid_rep_obj = NULL; int result; - Py_DECREF(uuid_type); /* PyObject_IsInstance returns -1 on error */ if (PyErr_Occurred()) { return 0; } - binary_type = _get_object(state->Binary, "bson", "Binary"); - if (binary_type == NULL) { - return 0; - } - if (!(uuid_rep_obj = PyLong_FromLong(options->uuid_rep))) { return 0; } - binary_value = PyObject_CallMethodObjArgs(binary_type, state->_from_uuid_str, value, uuid_rep_obj, NULL); + binary_value = PyObject_CallMethodObjArgs(state->Binary, state->_from_uuid_str, value, uuid_rep_obj, NULL); Py_DECREF(uuid_rep_obj); if (binary_value == NULL) { - Py_DECREF(binary_type); return 0; } @@ -1342,12 +1292,9 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, check_keys, options, in_custom_call, in_fallback_call); - Py_DECREF(binary_type); Py_DECREF(binary_value); return result; } - Py_XDECREF(mapping_type); - Py_XDECREF(uuid_type); /* Try a custom encoder if one is provided and we have not already * attempted to use a type encoder. */ @@ -1565,9 +1512,11 @@ int write_dict(PyObject* self, buffer_t buffer, int length; int length_location; struct module_state *state = GETSTATE(self); - PyObject* mapping_type; long type_marker; int is_dict = PyDict_Check(dict); + if (!state) { + return 0; + } if (!is_dict) { /* check for RawBSONDocument */ @@ -1580,39 +1529,33 @@ int write_dict(PyObject* self, buffer_t buffer, return write_raw_doc(buffer, dict, state->_raw_str); } - mapping_type = _get_object(state->Mapping, "collections.abc", "Mapping"); - - if (mapping_type) { - if (!PyObject_IsInstance(dict, mapping_type)) { - PyObject* repr; - Py_DECREF(mapping_type); - if ((repr = PyObject_Repr(dict))) { - PyObject* errmsg = PyUnicode_FromString( - "encoder expected a mapping type but got: "); - if (errmsg) { - PyObject* error = PyUnicode_Concat(errmsg, repr); - if (error) { - PyErr_SetObject(PyExc_TypeError, error); - Py_DECREF(error); - } - Py_DECREF(errmsg); - Py_DECREF(repr); - } - else { - Py_DECREF(repr); + if (!PyObject_IsInstance(dict, state->Mapping)) { + PyObject* repr; + if ((repr = PyObject_Repr(dict))) { + PyObject* errmsg = PyUnicode_FromString( + "encoder expected a mapping type but got: "); + if (errmsg) { + PyObject* error = PyUnicode_Concat(errmsg, repr); + if (error) { + PyErr_SetObject(PyExc_TypeError, error); + Py_DECREF(error); } - } else { - PyErr_SetString(PyExc_TypeError, - "encoder expected a mapping type"); + Py_DECREF(errmsg); + Py_DECREF(repr); } - - return 0; - } - Py_DECREF(mapping_type); - /* PyObject_IsInstance returns -1 on error */ - if (PyErr_Occurred()) { - return 0; + else { + Py_DECREF(repr); + } + } else { + PyErr_SetString(PyExc_TypeError, + "encoder expected a mapping type"); } + + return 0; + } + /* PyObject_IsInstance returns -1 on error */ + if (PyErr_Occurred()) { + return 0; } } @@ -1711,6 +1654,9 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { PyObject* raw_bson_document_bytes_obj; long type_marker; struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } if (!(PyArg_ParseTuple(args, "ObO|b", &dict, &check_keys, &options_obj, &top_level) && @@ -1757,13 +1703,14 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { */ static PyObject *_dbref_hook(PyObject* self, PyObject* value) { struct module_state *state = GETSTATE(self); - PyObject* dbref = NULL; - PyObject* dbref_type = NULL; PyObject* ref = NULL; PyObject* id = NULL; PyObject* database = NULL; PyObject* ret = NULL; int db_present = 0; + if (!state) { + return NULL; + } /* Decoding for DBRefs */ if (PyMapping_HasKey(value, state->_dollar_ref_str) && PyMapping_HasKey(value, state->_dollar_id_str)) { /* DBRef */ @@ -1801,16 +1748,12 @@ static PyObject *_dbref_hook(PyObject* self, PyObject* value) { PyMapping_DelItem(value, state->_dollar_db_str); } - if ((dbref_type = _get_object(state->DBRef, "bson.dbref", "DBRef"))) { - dbref = PyObject_CallFunctionObjArgs(dbref_type, ref, id, database, value, NULL); - Py_DECREF(value); - ret = dbref; - } + ret = PyObject_CallFunctionObjArgs(state->DBRef, ref, id, database, value, NULL); + Py_DECREF(value); } else { ret = value; } invalid: - Py_XDECREF(dbref_type); Py_XDECREF(ref); Py_XDECREF(id); Py_XDECREF(database); @@ -1822,6 +1765,9 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, unsigned max, const codec_options_t* options, int raw_array) { struct module_state *state = GETSTATE(self); PyObject* value = NULL; + if (!state) { + return NULL; + } switch (type) { case 1: { @@ -1878,23 +1824,17 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } + value = elements_to_dict(self, buffer + *position, + size, options); + if (!value) { + goto invalid; + } + if (options->is_raw_bson) { - value = PyObject_CallFunction( - options->document_class, "y#O", - buffer + *position, (Py_ssize_t)size, options->options_obj); - if (!value) { - goto invalid; - } *position += size; break; } - value = elements_to_dict(self, buffer + *position + 4, - size - 5, options); - if (!value) { - goto invalid; - } - /* Hook for DBRefs */ value = _dbref_hook(self, value); if (!value) { @@ -1976,7 +1916,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, { PyObject* data; PyObject* st; - PyObject* type_to_create; uint32_t length, length2; unsigned char subtype; @@ -2017,7 +1956,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } /* Encode as UUID or Binary based on options->uuid_rep */ if (subtype == 3 || subtype == 4) { - PyObject* binary_type = NULL; PyObject* binary_value = NULL; char uuid_rep = options->uuid_rep; @@ -2026,12 +1964,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto uuiderror; } - binary_type = _get_object(state->Binary, "bson", "Binary"); - if (binary_type == NULL) { - goto uuiderror; - } - - binary_value = PyObject_CallFunction(binary_type, "(Oi)", data, subtype); + binary_value = PyObject_CallFunction(state->Binary, "(Oi)", data, subtype); if (binary_value == NULL) { goto uuiderror; } @@ -2051,7 +1984,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } uuiderror: - Py_XDECREF(binary_type); Py_XDECREF(binary_value); Py_DECREF(data); if (!value) { @@ -2066,10 +1998,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, Py_DECREF(data); goto invalid; } - if ((type_to_create = _get_object(state->Binary, "bson.binary", "Binary"))) { - value = PyObject_CallFunctionObjArgs(type_to_create, data, st, NULL); - Py_DECREF(type_to_create); - } + value = PyObject_CallFunctionObjArgs(state->Binary, data, st, NULL); Py_DECREF(st); Py_DECREF(data); if (!value) { @@ -2087,15 +2016,10 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } case 7: { - PyObject* objectid_type; if (max < 12) { goto invalid; } - if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { - value = PyObject_CallFunction(objectid_type, "y#", - buffer + *position, (Py_ssize_t)12); - Py_DECREF(objectid_type); - } + value = PyObject_CallFunction(state->ObjectId, "y#", buffer + *position, (Py_ssize_t)12); *position += 12; break; } @@ -2119,7 +2043,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } case 9: { - PyObject* utc_type; PyObject* naive; PyObject* replace; PyObject* args; @@ -2143,36 +2066,24 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (dt_clamp || dt_auto){ - PyObject *min_millis_fn = _get_object(state->_min_datetime_ms, "bson.datetime_ms", "_min_datetime_ms"); - PyObject *max_millis_fn = _get_object(state->_max_datetime_ms, "bson.datetime_ms", "_max_datetime_ms"); PyObject *min_millis_fn_res; PyObject *max_millis_fn_res; int64_t min_millis; int64_t max_millis; - if (min_millis_fn == NULL || max_millis_fn == NULL) { - Py_XDECREF(min_millis_fn); - Py_XDECREF(max_millis_fn); - goto invalid; - } - if (options->tz_aware){ PyObject* tzinfo = options->tzinfo; if (tzinfo == Py_None) { // Default to UTC. - utc_type = _get_object(state->UTC, "bson.tz_util", "utc"); - tzinfo = utc_type; + tzinfo = state->UTC; } - min_millis_fn_res = PyObject_CallFunctionObjArgs(min_millis_fn, tzinfo, NULL); - max_millis_fn_res = PyObject_CallFunctionObjArgs(max_millis_fn, tzinfo, NULL); + min_millis_fn_res = PyObject_CallFunctionObjArgs(state->_min_datetime_ms, tzinfo, NULL); + max_millis_fn_res = PyObject_CallFunctionObjArgs(state->_max_datetime_ms, tzinfo, NULL); } else { - min_millis_fn_res = PyObject_CallObject(min_millis_fn, NULL); - max_millis_fn_res = PyObject_CallObject(max_millis_fn, NULL); + min_millis_fn_res = PyObject_CallObject(state->_min_datetime_ms, NULL); + max_millis_fn_res = PyObject_CallObject(state->_max_datetime_ms, NULL); } - Py_DECREF(min_millis_fn); - Py_DECREF(max_millis_fn); - if (!min_millis_fn_res || !max_millis_fn_res){ Py_XDECREF(min_millis_fn_res); Py_XDECREF(max_millis_fn_res); @@ -2228,15 +2139,12 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, Py_DECREF(args); goto invalid; } - utc_type = _get_object(state->UTC, "bson.tz_util", "utc"); - if (!utc_type || PyDict_SetItem(kwargs, state->_tzinfo_str, utc_type) == -1) { + if (PyDict_SetItem(kwargs, state->_tzinfo_str, state->UTC) == -1) { Py_DECREF(replace); Py_DECREF(args); Py_DECREF(kwargs); - Py_XDECREF(utc_type); goto invalid; } - Py_XDECREF(utc_type); value = PyObject_Call(replace, args, kwargs); if (!value) { Py_DECREF(replace); @@ -2266,7 +2174,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } case 11: { - PyObject* regex_class; PyObject* pattern; int flags; size_t flags_length, i; @@ -2309,12 +2216,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } *position += (unsigned)flags_length + 1; - regex_class = _get_object(state->Regex, "bson.regex", "Regex"); - if (regex_class) { - value = PyObject_CallFunction(regex_class, - "Oi", pattern, flags); - Py_DECREF(regex_class); - } + value = PyObject_CallFunction(state->Regex, "Oi", pattern, flags); Py_DECREF(pattern); break; } @@ -2323,8 +2225,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, uint32_t coll_length; PyObject* collection; PyObject* id = NULL; - PyObject* objectid_type; - PyObject* dbref_type; if (max < 4) { goto invalid; @@ -2349,20 +2249,13 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } *position += coll_length; - if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { - id = PyObject_CallFunction(objectid_type, "y#", - buffer + *position, (Py_ssize_t)12); - Py_DECREF(objectid_type); - } + id = PyObject_CallFunction(state->ObjectId, "y#", buffer + *position, (Py_ssize_t)12); if (!id) { Py_DECREF(collection); goto invalid; } *position += 12; - if ((dbref_type = _get_object(state->DBRef, "bson.dbref", "DBRef"))) { - value = PyObject_CallFunctionObjArgs(dbref_type, collection, id, NULL); - Py_DECREF(dbref_type); - } + value = PyObject_CallFunctionObjArgs(state->DBRef, collection, id, NULL); Py_DECREF(collection); Py_DECREF(id); break; @@ -2370,7 +2263,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, case 13: { PyObject* code; - PyObject* code_type; uint32_t value_length; if (max < 4) { goto invalid; @@ -2393,10 +2285,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } *position += value_length; - if ((code_type = _get_object(state->Code, "bson.code", "Code"))) { - value = PyObject_CallFunctionObjArgs(code_type, code, NULL, NULL); - Py_DECREF(code_type); - } + value = PyObject_CallFunctionObjArgs(state->Code, code, NULL, NULL); Py_DECREF(code); break; } @@ -2405,9 +2294,9 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, uint32_t c_w_s_size; uint32_t code_size; uint32_t scope_size; + uint32_t len; PyObject* code; PyObject* scope; - PyObject* code_type; if (max < 8) { goto invalid; @@ -2424,7 +2313,8 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, memcpy(&code_size, buffer + *position, 4); code_size = BSON_UINT32_FROM_LE(code_size); /* code_w_scope length + code length + code + scope length */ - if (!code_size || max < code_size || max < 4 + 4 + code_size + 4) { + len = 4 + 4 + code_size + 4; + if (!code_size || max < code_size || max < len || len < code_size) { goto invalid; } *position += 4; @@ -2442,12 +2332,9 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, memcpy(&scope_size, buffer + *position, 4); scope_size = BSON_UINT32_FROM_LE(scope_size); - if (scope_size < BSON_MIN_SIZE) { - Py_DECREF(code); - goto invalid; - } /* code length + code + scope length + scope */ - if ((4 + code_size + 4 + scope_size) != c_w_s_size) { + len = 4 + 4 + code_size + scope_size; + if (scope_size < BSON_MIN_SIZE || len != c_w_s_size || len < scope_size) { Py_DECREF(code); goto invalid; } @@ -2456,18 +2343,15 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (buffer[*position + scope_size - 1]) { goto invalid; } - scope = elements_to_dict(self, buffer + *position + 4, - scope_size - 5, options); + scope = elements_to_dict(self, buffer + *position, + scope_size, options); if (!scope) { Py_DECREF(code); goto invalid; } *position += scope_size; - if ((code_type = _get_object(state->Code, "bson.code", "Code"))) { - value = PyObject_CallFunctionObjArgs(code_type, code, scope, NULL); - Py_DECREF(code_type); - } + value = PyObject_CallFunctionObjArgs(state->Code, code, scope, NULL); Py_DECREF(code); Py_DECREF(scope); break; @@ -2490,7 +2374,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, case 17: { uint32_t time, inc; - PyObject* timestamp_type; if (max < 8) { goto invalid; } @@ -2498,68 +2381,44 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, memcpy(&time, buffer + *position + 4, 4); inc = BSON_UINT32_FROM_LE(inc); time = BSON_UINT32_FROM_LE(time); - if ((timestamp_type = _get_object(state->Timestamp, "bson.timestamp", "Timestamp"))) { - value = PyObject_CallFunction(timestamp_type, "II", time, inc); - Py_DECREF(timestamp_type); - } + value = PyObject_CallFunction(state->Timestamp, "II", time, inc); *position += 8; break; } case 18: { int64_t ll; - PyObject* bson_int64_type = _get_object(state->BSONInt64, - "bson.int64", "Int64"); - if (!bson_int64_type) - goto invalid; if (max < 8) { - Py_DECREF(bson_int64_type); goto invalid; } memcpy(&ll, buffer + *position, 8); ll = (int64_t)BSON_UINT64_FROM_LE(ll); - value = PyObject_CallFunction(bson_int64_type, "L", ll); + value = PyObject_CallFunction(state->BSONInt64, "L", ll); *position += 8; - Py_DECREF(bson_int64_type); break; } case 19: { - PyObject* dec128; if (max < 16) { goto invalid; } - if ((dec128 = _get_object(state->Decimal128, - "bson.decimal128", - "Decimal128"))) { - PyObject *_bytes_obj = PyBytes_FromStringAndSize(buffer + *position, (Py_ssize_t)16); - if (!_bytes_obj) { - Py_DECREF(dec128); - goto invalid; - } - value = PyObject_CallMethodObjArgs(dec128, state->_from_bid_str, _bytes_obj, NULL); - Py_DECREF(dec128); - Py_DECREF(_bytes_obj); + PyObject *_bytes_obj = PyBytes_FromStringAndSize(buffer + *position, (Py_ssize_t)16); + if (!_bytes_obj) { + goto invalid; } + value = PyObject_CallMethodObjArgs(state->Decimal128, state->_from_bid_str, _bytes_obj, NULL); + Py_DECREF(_bytes_obj); *position += 16; break; } case 255: { - PyObject* minkey_type = _get_object(state->MinKey, "bson.min_key", "MinKey"); - if (!minkey_type) - goto invalid; - value = PyObject_CallFunctionObjArgs(minkey_type, NULL); - Py_DECREF(minkey_type); + value = PyObject_CallFunctionObjArgs(state->MinKey, NULL); break; } case 127: { - PyObject* maxkey_type = _get_object(state->MaxKey, "bson.max_key", "MaxKey"); - if (!maxkey_type) - goto invalid; - value = PyObject_CallFunctionObjArgs(maxkey_type, NULL); - Py_DECREF(maxkey_type); + value = PyObject_CallFunctionObjArgs(state->MaxKey, NULL); break; } default: @@ -2809,9 +2668,14 @@ static PyObject* elements_to_dict(PyObject* self, const char* string, unsigned max, const codec_options_t* options) { PyObject* result; + if (options->is_raw_bson) { + return PyObject_CallFunction( + options->document_class, "y#O", + string, max, options->options_obj); + } if (Py_EnterRecursiveCall(" while decoding a BSON document")) return NULL; - result = _elements_to_dict(self, string, max, options); + result = _elements_to_dict(self, string + 4, max - 5, options); Py_LeaveRecursiveCall(); return result; } @@ -2902,15 +2766,7 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { goto done; } - /* No need to decode fields if using RawBSONDocument */ - if (options.is_raw_bson) { - result = PyObject_CallFunction( - options.document_class, "y#O", string, (Py_ssize_t)size, - options_obj); - } - else { - result = elements_to_dict(self, string + 4, (unsigned)size - 5, &options); - } + result = elements_to_dict(self, string, (unsigned)size, &options); done: PyBuffer_Release(&view); destroy_codec_options(&options); @@ -2988,14 +2844,7 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { goto fail; } - /* No need to decode fields if using RawBSONDocument. */ - if (options.is_raw_bson) { - dict = PyObject_CallFunction( - options.document_class, "y#O", string, (Py_ssize_t)size, - options_obj); - } else { - dict = elements_to_dict(self, string + 4, (unsigned)size - 5, &options); - } + dict = elements_to_dict(self, string, (unsigned)size, &options); if (!dict) { Py_DECREF(result); goto fail; @@ -3150,103 +2999,101 @@ static PyMethodDef _CBSONMethods[] = { {NULL, NULL, 0, NULL} }; -#define INITERROR return NULL +#define INITERROR return -1; static int _cbson_traverse(PyObject *m, visitproc visit, void *arg) { - Py_VISIT(GETSTATE(m)->Binary); - Py_VISIT(GETSTATE(m)->Code); - Py_VISIT(GETSTATE(m)->ObjectId); - Py_VISIT(GETSTATE(m)->DBRef); - Py_VISIT(GETSTATE(m)->Regex); - Py_VISIT(GETSTATE(m)->UUID); - Py_VISIT(GETSTATE(m)->Timestamp); - Py_VISIT(GETSTATE(m)->MinKey); - Py_VISIT(GETSTATE(m)->MaxKey); - Py_VISIT(GETSTATE(m)->UTC); - Py_VISIT(GETSTATE(m)->REType); - Py_VISIT(GETSTATE(m)->_type_marker_str); - Py_VISIT(GETSTATE(m)->_flags_str); - Py_VISIT(GETSTATE(m)->_pattern_str); - Py_VISIT(GETSTATE(m)->_encoder_map_str); - Py_VISIT(GETSTATE(m)->_decoder_map_str); - Py_VISIT(GETSTATE(m)->_fallback_encoder_str); - Py_VISIT(GETSTATE(m)->_raw_str); - Py_VISIT(GETSTATE(m)->_subtype_str); - Py_VISIT(GETSTATE(m)->_binary_str); - Py_VISIT(GETSTATE(m)->_scope_str); - Py_VISIT(GETSTATE(m)->_inc_str); - Py_VISIT(GETSTATE(m)->_time_str); - Py_VISIT(GETSTATE(m)->_bid_str); - Py_VISIT(GETSTATE(m)->_replace_str); - Py_VISIT(GETSTATE(m)->_astimezone_str); - Py_VISIT(GETSTATE(m)->_id_str); - Py_VISIT(GETSTATE(m)->_dollar_ref_str); - Py_VISIT(GETSTATE(m)->_dollar_id_str); - Py_VISIT(GETSTATE(m)->_dollar_db_str); - Py_VISIT(GETSTATE(m)->_tzinfo_str); - Py_VISIT(GETSTATE(m)->_as_doc_str); - Py_VISIT(GETSTATE(m)->_utcoffset_str); - Py_VISIT(GETSTATE(m)->_from_uuid_str); - Py_VISIT(GETSTATE(m)->_as_uuid_str); - Py_VISIT(GETSTATE(m)->_from_bid_str); + struct module_state *state = GETSTATE(m); + if (!state) { + return 0; + } + Py_VISIT(state->Binary); + Py_VISIT(state->Code); + Py_VISIT(state->ObjectId); + Py_VISIT(state->DBRef); + Py_VISIT(state->Regex); + Py_VISIT(state->UUID); + Py_VISIT(state->Timestamp); + Py_VISIT(state->MinKey); + Py_VISIT(state->MaxKey); + Py_VISIT(state->UTC); + Py_VISIT(state->REType); + Py_VISIT(state->_type_marker_str); + Py_VISIT(state->_flags_str); + Py_VISIT(state->_pattern_str); + Py_VISIT(state->_encoder_map_str); + Py_VISIT(state->_decoder_map_str); + Py_VISIT(state->_fallback_encoder_str); + Py_VISIT(state->_raw_str); + Py_VISIT(state->_subtype_str); + Py_VISIT(state->_binary_str); + Py_VISIT(state->_scope_str); + Py_VISIT(state->_inc_str); + Py_VISIT(state->_time_str); + Py_VISIT(state->_bid_str); + Py_VISIT(state->_replace_str); + Py_VISIT(state->_astimezone_str); + Py_VISIT(state->_id_str); + Py_VISIT(state->_dollar_ref_str); + Py_VISIT(state->_dollar_id_str); + Py_VISIT(state->_dollar_db_str); + Py_VISIT(state->_tzinfo_str); + Py_VISIT(state->_as_doc_str); + Py_VISIT(state->_utcoffset_str); + Py_VISIT(state->_from_uuid_str); + Py_VISIT(state->_as_uuid_str); + Py_VISIT(state->_from_bid_str); return 0; } static int _cbson_clear(PyObject *m) { - Py_CLEAR(GETSTATE(m)->Binary); - Py_CLEAR(GETSTATE(m)->Code); - Py_CLEAR(GETSTATE(m)->ObjectId); - Py_CLEAR(GETSTATE(m)->DBRef); - Py_CLEAR(GETSTATE(m)->Regex); - Py_CLEAR(GETSTATE(m)->UUID); - Py_CLEAR(GETSTATE(m)->Timestamp); - Py_CLEAR(GETSTATE(m)->MinKey); - Py_CLEAR(GETSTATE(m)->MaxKey); - Py_CLEAR(GETSTATE(m)->UTC); - Py_CLEAR(GETSTATE(m)->REType); - Py_CLEAR(GETSTATE(m)->_type_marker_str); - Py_CLEAR(GETSTATE(m)->_flags_str); - Py_CLEAR(GETSTATE(m)->_pattern_str); - Py_CLEAR(GETSTATE(m)->_encoder_map_str); - Py_CLEAR(GETSTATE(m)->_decoder_map_str); - Py_CLEAR(GETSTATE(m)->_fallback_encoder_str); - Py_CLEAR(GETSTATE(m)->_raw_str); - Py_CLEAR(GETSTATE(m)->_subtype_str); - Py_CLEAR(GETSTATE(m)->_binary_str); - Py_CLEAR(GETSTATE(m)->_scope_str); - Py_CLEAR(GETSTATE(m)->_inc_str); - Py_CLEAR(GETSTATE(m)->_time_str); - Py_CLEAR(GETSTATE(m)->_bid_str); - Py_CLEAR(GETSTATE(m)->_replace_str); - Py_CLEAR(GETSTATE(m)->_astimezone_str); - Py_CLEAR(GETSTATE(m)->_id_str); - Py_CLEAR(GETSTATE(m)->_dollar_ref_str); - Py_CLEAR(GETSTATE(m)->_dollar_id_str); - Py_CLEAR(GETSTATE(m)->_dollar_db_str); - Py_CLEAR(GETSTATE(m)->_tzinfo_str); - Py_CLEAR(GETSTATE(m)->_as_doc_str); - Py_CLEAR(GETSTATE(m)->_utcoffset_str); - Py_CLEAR(GETSTATE(m)->_from_uuid_str); - Py_CLEAR(GETSTATE(m)->_as_uuid_str); - Py_CLEAR(GETSTATE(m)->_from_bid_str); + struct module_state *state = GETSTATE(m); + if (!state) { + return 0; + } + Py_CLEAR(state->Binary); + Py_CLEAR(state->Code); + Py_CLEAR(state->ObjectId); + Py_CLEAR(state->DBRef); + Py_CLEAR(state->Regex); + Py_CLEAR(state->UUID); + Py_CLEAR(state->Timestamp); + Py_CLEAR(state->MinKey); + Py_CLEAR(state->MaxKey); + Py_CLEAR(state->UTC); + Py_CLEAR(state->REType); + Py_CLEAR(state->_type_marker_str); + Py_CLEAR(state->_flags_str); + Py_CLEAR(state->_pattern_str); + Py_CLEAR(state->_encoder_map_str); + Py_CLEAR(state->_decoder_map_str); + Py_CLEAR(state->_fallback_encoder_str); + Py_CLEAR(state->_raw_str); + Py_CLEAR(state->_subtype_str); + Py_CLEAR(state->_binary_str); + Py_CLEAR(state->_scope_str); + Py_CLEAR(state->_inc_str); + Py_CLEAR(state->_time_str); + Py_CLEAR(state->_bid_str); + Py_CLEAR(state->_replace_str); + Py_CLEAR(state->_astimezone_str); + Py_CLEAR(state->_id_str); + Py_CLEAR(state->_dollar_ref_str); + Py_CLEAR(state->_dollar_id_str); + Py_CLEAR(state->_dollar_db_str); + Py_CLEAR(state->_tzinfo_str); + Py_CLEAR(state->_as_doc_str); + Py_CLEAR(state->_utcoffset_str); + Py_CLEAR(state->_from_uuid_str); + Py_CLEAR(state->_as_uuid_str); + Py_CLEAR(state->_from_bid_str); return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_cbson", - NULL, - sizeof(struct module_state), - _CBSONMethods, - NULL, - _cbson_traverse, - _cbson_clear, - NULL -}; - -PyMODINIT_FUNC -PyInit__cbson(void) +/* Multi-phase extension module initialization code. + * See https://peps.python.org/pep-0489/. +*/ +static int +_cbson_exec(PyObject *m) { - PyObject *m; PyObject *c_api_object; static void *_cbson_API[_cbson_API_POINTER_COUNT]; @@ -3273,12 +3120,6 @@ PyInit__cbson(void) if (c_api_object == NULL) INITERROR; - m = PyModule_Create(&moduledef); - if (m == NULL) { - Py_DECREF(c_api_object); - INITERROR; - } - /* Import several python objects */ if (_load_python_objects(m)) { Py_DECREF(c_api_object); @@ -3292,5 +3133,32 @@ PyInit__cbson(void) INITERROR; } - return m; + return 0; +} + +static PyModuleDef_Slot _cbson_slots[] = { + {Py_mod_exec, _cbson_exec}, +#if defined(Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED) + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED}, +#endif + {0, NULL}, +}; + + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_cbson", + NULL, + sizeof(struct module_state), + _CBSONMethods, + _cbson_slots, + _cbson_traverse, + _cbson_clear, + NULL +}; + +PyMODINIT_FUNC +PyInit__cbson(void) +{ + return PyModuleDef_Init(&moduledef); } diff --git a/bson/binary.py b/bson/binary.py index a4cd44e930..5fe1bacd16 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -211,10 +211,9 @@ class Binary(bytes): .. note:: Instances of Binary with subtype 0 will be decoded directly to :class:`bytes`. - :Parameters: - - `data`: the binary data to represent. Can be any bytes-like type + :param data: the binary data to represent. Can be any bytes-like type that implements the buffer protocol. - - `subtype` (optional): the `binary subtype + :param subtype: the `binary subtype `_ to use @@ -253,9 +252,8 @@ def from_uuid( Raises :exc:`TypeError` if `uuid` is not an instance of :class:`~uuid.UUID`. - :Parameters: - - `uuid`: A :class:`uuid.UUID` instance. - - `uuid_representation`: A member of + :param uuid: A :class:`uuid.UUID` instance. + :param uuid_representation: A member of :class:`~bson.binary.UuidRepresentation`. Default: :const:`~bson.binary.UuidRepresentation.STANDARD`. See :ref:`handling-uuid-data-example` for details. @@ -304,8 +302,7 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI Raises :exc:`ValueError` if this :class:`~bson.binary.Binary` instance does not contain a UUID. - :Parameters: - - `uuid_representation`: A member of + :param uuid_representation: A member of :class:`~bson.binary.UuidRepresentation`. Default: :const:`~bson.binary.UuidRepresentation.STANDARD`. See :ref:`handling-uuid-data-example` for details. @@ -367,4 +364,7 @@ def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self) -> str: - return f"Binary({bytes.__repr__(self)}, {self.__subtype})" + if self.__subtype == SENSITIVE_SUBTYPE: + return f"" + else: + return f"Binary({bytes.__repr__(self)}, {self.__subtype})" diff --git a/bson/code.py b/bson/code.py index 689cda4acd..6b4541d0ff 100644 --- a/bson/code.py +++ b/bson/code.py @@ -31,15 +31,14 @@ class Code(str): keyword argument it will override any setting for that variable in the `scope` dictionary. - :Parameters: - - `code`: A string containing JavaScript code to be evaluated or another + :param code: A string containing JavaScript code to be evaluated or another instance of Code. In the latter case, the scope of `code` becomes this Code's :attr:`scope`. - - `scope` (optional): dictionary representing the scope in which + :param scope: dictionary representing the scope in which `code` should be evaluated - a mapping from identifiers (as strings) to values. Defaults to ``None``. This is applied after any scope associated with a given `code` above. - - `**kwargs` (optional): scope variables can also be passed as + :param kwargs: scope variables can also be passed as keyword arguments. These are applied after `scope` and `code`. .. versionchanged:: 3.4 diff --git a/bson/codec_options.py b/bson/codec_options.py index 2c64c64600..3a0b83b7be 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -120,14 +120,13 @@ class TypeRegistry: See :ref:`custom-type-type-registry` documentation for an example. - :Parameters: - - `type_codecs` (optional): iterable of type codec instances. If + :param type_codecs: iterable of type codec instances. If ``type_codecs`` contains multiple codecs that transform a single python or BSON type, the transformation specified by the type codec occurring last prevails. A TypeError will be raised if one or more type codecs modify the encoding behavior of a built-in :mod:`bson` type. - - `fallback_encoder` (optional): callable that accepts a single, + :param fallback_encoder: callable that accepts a single, unencodable python value and transforms it into a type that :mod:`bson` can encode. See :ref:`fallback-encoder-callable` documentation for an example. @@ -324,30 +323,29 @@ def __init__(self, *args, **kwargs): See :doc:`/examples/uuid` for examples using the `uuid_representation` option. - :Parameters: - - `document_class`: BSON documents returned in queries will be decoded + :param document_class: BSON documents returned in queries will be decoded to an instance of this class. Must be a subclass of :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. - - `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone + :param tz_aware: If ``True``, BSON datetimes will be decoded to timezone aware instances of :class:`~datetime.datetime`. Otherwise they will be naive. Defaults to ``False``. - - `uuid_representation`: The BSON representation to use when encoding + :param uuid_representation: The BSON representation to use when encoding and decoding instances of :class:`~uuid.UUID`. Defaults to :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New applications should consider setting this to :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language compatibility. See :ref:`handling-uuid-data-example` for details. - - `unicode_decode_error_handler`: The error handler to apply when + :param unicode_decode_error_handler: The error handler to apply when a Unicode-related error occurs during BSON decoding that would otherwise raise :exc:`UnicodeDecodeError`. Valid options include 'strict', 'replace', 'backslashreplace', 'surrogateescape', and 'ignore'. Defaults to 'strict'. - - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the + :param tzinfo: A :class:`~datetime.tzinfo` subclass that specifies the timezone to/from which :class:`~datetime.datetime` objects should be encoded/decoded. - - `type_registry`: Instance of :class:`TypeRegistry` used to customize + :param type_registry: Instance of :class:`TypeRegistry` used to customize encoding and decoding behavior. - - `datetime_conversion`: Specifies how UTC datetimes should be decoded + :param datetime_conversion: Specifies how UTC datetimes should be decoded within BSON. Valid options include 'datetime_ms' to return as a DatetimeMS, 'datetime' to return as a datetime.datetime and raising a ValueError for out-of-range values, 'datetime_auto' to diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py index b6aebd05d0..112871a16c 100644 --- a/bson/datetime_ms.py +++ b/bson/datetime_ms.py @@ -51,12 +51,11 @@ def __init__(self, value: Union[int, datetime.datetime]): encoding/decoding BSON. To decode UTC datetimes as a ``DatetimeMS``, `datetime_conversion` in - :class:`~bson.CodecOptions` must be set to 'datetime_ms' or + :class:`~bson.codec_options.CodecOptions` must be set to 'datetime_ms' or 'datetime_auto'. See :ref:`handling-out-of-range-datetimes` for details. - :Parameters: - - `value`: An instance of :class:`datetime.datetime` to be + :param value: An instance of :class:`datetime.datetime` to be represented as milliseconds since the Unix epoch, or int of milliseconds since the Unix epoch. """ @@ -104,8 +103,7 @@ def as_datetime( ) -> datetime.datetime: """Create a Python :class:`~datetime.datetime` from this DatetimeMS object. - :Parameters: - - `codec_options`: A CodecOptions instance for specifying how the + :param codec_options: A CodecOptions instance for specifying how the resulting DatetimeMS object will be formatted using ``tz_aware`` and ``tz_info``. Defaults to :const:`~bson.codec_options.DEFAULT_CODEC_OPTIONS`. diff --git a/bson/dbref.py b/bson/dbref.py index 50fcf6c02f..6c21b8162c 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -47,11 +47,10 @@ def __init__( keyword arguments will create additional fields in the resultant embedded document. - :Parameters: - - `collection`: name of the collection the document is stored in - - `id`: the value of the document's ``"_id"`` field - - `database` (optional): name of the database to reference - - `**kwargs` (optional): additional keyword arguments will + :param collection: name of the collection the document is stored in + :param id: the value of the document's ``"_id"`` field + :param database: name of the database to reference + :param kwargs: additional keyword arguments will create additional, custom fields .. seealso:: The MongoDB documentation on `dbrefs `_. diff --git a/bson/decimal128.py b/bson/decimal128.py index f807452a6c..8581d5a3c8 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -70,8 +70,7 @@ def create_decimal128_context() -> decimal.Context: def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: """Converts a decimal.Decimal to BID (high bits, low bits). - :Parameters: - - `value`: An instance of decimal.Decimal + :param value: An instance of decimal.Decimal """ with decimal.localcontext(_DEC128_CTX) as ctx: value = ctx.create_decimal(value) @@ -126,8 +125,7 @@ class Decimal128: >>> Decimal128((3474527112516337664, 5)) Decimal128('0.0005') - :Parameters: - - `value`: An instance of :class:`decimal.Decimal`, string, or tuple of + :param value: An instance of :class:`decimal.Decimal`, string, or tuple of (high bits, low bits) from Binary Integer Decimal (BID) format. .. note:: :class:`~Decimal128` uses an instance of :class:`decimal.Context` @@ -275,8 +273,7 @@ def from_bid(cls: Type[Decimal128], value: bytes) -> Decimal128: """Create an instance of :class:`Decimal128` from Binary Integer Decimal string. - :Parameters: - - `value`: 16 byte string (128-bit IEEE 754-2008 decimal floating + :param value: 16 byte string (128-bit IEEE 754-2008 decimal floating point in Binary Integer Decimal (BID) format). """ if not isinstance(value, bytes): diff --git a/bson/int64.py b/bson/int64.py index c0676839ab..5846504a2d 100644 --- a/bson/int64.py +++ b/bson/int64.py @@ -25,8 +25,7 @@ class Int64(int): Python 3. Small integral numbers are encoded to BSON int32 by default, but Int64 numbers will always be encoded to BSON int64. - :Parameters: - - `value`: the numeric value to represent + :param value: the numeric value to represent """ __slots__ = () diff --git a/bson/json_util.py b/bson/json_util.py index 1a74a81368..6c5197c75a 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -110,6 +110,7 @@ from typing import ( TYPE_CHECKING, Any, + Callable, Mapping, MutableMapping, Optional, @@ -137,7 +138,7 @@ from bson.min_key import MinKey from bson.objectid import ObjectId from bson.regex import Regex -from bson.son import RE_TYPE, SON +from bson.son import RE_TYPE from bson.timestamp import Timestamp from bson.tz_util import utc @@ -235,6 +236,8 @@ class JSONMode: else: _BASE_CLASS = CodecOptions +_INT32_MAX = 2**31 + class JSONOptions(_BASE_CLASS): json_mode: int @@ -246,33 +249,32 @@ class JSONOptions(_BASE_CLASS): def __init__(self, *args: Any, **kwargs: Any): """Encapsulates JSON options for :func:`dumps` and :func:`loads`. - :Parameters: - - `strict_number_long`: If ``True``, :class:`~bson.int64.Int64` objects + :param strict_number_long: If ``True``, :class:`~bson.int64.Int64` objects are encoded to MongoDB Extended JSON's *Strict mode* type `NumberLong`, ie ``'{"$numberLong": "" }'``. Otherwise they will be encoded as an `int`. Defaults to ``False``. - - `datetime_representation`: The representation to use when encoding + :param datetime_representation: The representation to use when encoding instances of :class:`datetime.datetime`. Defaults to :const:`~DatetimeRepresentation.LEGACY`. - - `strict_uuid`: If ``True``, :class:`uuid.UUID` object are encoded to + :param strict_uuid: If ``True``, :class:`uuid.UUID` object are encoded to MongoDB Extended JSON's *Strict mode* type `Binary`. Otherwise it will be encoded as ``'{"$uuid": "" }'``. Defaults to ``False``. - - `json_mode`: The :class:`JSONMode` to use when encoding BSON types to + :param json_mode: The :class:`JSONMode` to use when encoding BSON types to Extended JSON. Defaults to :const:`~JSONMode.LEGACY`. - - `document_class`: BSON documents returned by :func:`loads` will be + :param document_class: BSON documents returned by :func:`loads` will be decoded to an instance of this class. Must be a subclass of :class:`collections.MutableMapping`. Defaults to :class:`dict`. - - `uuid_representation`: The :class:`~bson.binary.UuidRepresentation` + :param uuid_representation: The :class:`~bson.binary.UuidRepresentation` to use when encoding and decoding instances of :class:`uuid.UUID`. Defaults to :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. - - `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type + :param tz_aware: If ``True``, MongoDB Extended JSON's *Strict mode* type `Date` will be decoded to timezone aware instances of :class:`datetime.datetime`. Otherwise they will be naive. Defaults to ``False``. - - `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the + :param tzinfo: A :class:`datetime.tzinfo` subclass that specifies the timezone from which :class:`~datetime.datetime` objects should be decoded. Defaults to :const:`~bson.tz_util.utc`. - - `datetime_conversion`: Specifies how UTC datetimes should be decoded + :param datetime_conversion: Specifies how UTC datetimes should be decoded within BSON. Valid options include 'datetime_ms' to return as a DatetimeMS, 'datetime' to return as a datetime.datetime and raising a ValueError for out-of-range values, 'datetime_auto' to @@ -280,8 +282,8 @@ def __init__(self, *args: Any, **kwargs: Any): out-of-range and 'datetime_clamp' to clamp to the minimum and maximum possible datetimes. Defaults to 'datetime'. See :ref:`handling-out-of-range-datetimes` for details. - - `args`: arguments to :class:`~bson.codec_options.CodecOptions` - - `kwargs`: arguments to :class:`~bson.codec_options.CodecOptions` + :param args: arguments to :class:`~bson.codec_options.CodecOptions` + :param kwargs: arguments to :class:`~bson.codec_options.CodecOptions` .. seealso:: The specification for Relaxed and Canonical `Extended JSON`_. @@ -456,8 +458,7 @@ def dumps(obj: Any, *args: Any, **kwargs: Any) -> str: Recursive function that handles all BSON types including :class:`~bson.binary.Binary` and :class:`~bson.code.Code`. - :Parameters: - - `json_options`: A :class:`JSONOptions` instance used to modify the + :param json_options: A :class:`JSONOptions` instance used to modify the encoding of MongoDB Extended JSON types. Defaults to :const:`DEFAULT_JSON_OPTIONS`. @@ -480,8 +481,7 @@ def loads(s: Union[str, bytes, bytearray], *args: Any, **kwargs: Any) -> Any: Raises ``TypeError``, ``ValueError``, ``KeyError``, or :exc:`~bson.errors.InvalidId` on invalid MongoDB Extended JSON. - :Parameters: - - `json_options`: A :class:`JSONOptions` instance used to modify the + :param json_options: A :class:`JSONOptions` instance used to modify the decoding of MongoDB Extended JSON types. Defaults to :const:`DEFAULT_JSON_OPTIONS`. @@ -499,7 +499,11 @@ def loads(s: Union[str, bytes, bytearray], *args: Any, **kwargs: Any) -> Any: Accepts optional parameter `json_options`. See :class:`JSONOptions`. """ json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS) - kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook(pairs, json_options) + # Execution time optimization if json_options.document_class is dict + if json_options.document_class is dict: + kwargs["object_hook"] = lambda obj: object_hook(obj, json_options) + else: + kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook(pairs, json_options) return json.loads(s, *args, **kwargs) @@ -508,7 +512,7 @@ def _json_convert(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> converted into json. """ if hasattr(obj, "items"): - return SON(((k, _json_convert(v, json_options)) for k, v in obj.items())) + return {k: _json_convert(v, json_options) for k, v in obj.items()} elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes)): return [_json_convert(v, json_options) for v in obj] try: @@ -524,54 +528,17 @@ def object_pairs_hook( def object_hook(dct: Mapping[str, Any], json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: - if "$oid" in dct: - return _parse_canonical_oid(dct) - if ( - isinstance(dct.get("$ref"), str) - and "$id" in dct - and isinstance(dct.get("$db"), (str, type(None))) - ): - return _parse_canonical_dbref(dct) - if "$date" in dct: - return _parse_canonical_datetime(dct, json_options) - if "$regex" in dct: - return _parse_legacy_regex(dct) - if "$minKey" in dct: - return _parse_canonical_minkey(dct) - if "$maxKey" in dct: - return _parse_canonical_maxkey(dct) - if "$binary" in dct: - if "$type" in dct: - return _parse_legacy_binary(dct, json_options) - else: - return _parse_canonical_binary(dct, json_options) - if "$code" in dct: - return _parse_canonical_code(dct) - if "$uuid" in dct: - return _parse_legacy_uuid(dct, json_options) - if "$undefined" in dct: - return None - if "$numberLong" in dct: - return _parse_canonical_int64(dct) - if "$timestamp" in dct: - tsp = dct["$timestamp"] - return Timestamp(tsp["t"], tsp["i"]) - if "$numberDecimal" in dct: - return _parse_canonical_decimal128(dct) - if "$dbPointer" in dct: - return _parse_canonical_dbpointer(dct) - if "$regularExpression" in dct: - return _parse_canonical_regex(dct) - if "$symbol" in dct: - return _parse_canonical_symbol(dct) - if "$numberInt" in dct: - return _parse_canonical_int32(dct) - if "$numberDouble" in dct: - return _parse_canonical_double(dct) + match = None + for k in dct: + if k in _PARSERS_SET: + match = k + break + if match: + return _PARSERS[match](dct, json_options) return dct -def _parse_legacy_regex(doc: Any) -> Any: +def _parse_legacy_regex(doc: Any, dummy0: Any) -> Any: pattern = doc["$regex"] # Check if this is the $regex query operator. if not isinstance(pattern, (str, bytes)): @@ -707,14 +674,14 @@ def _parse_canonical_datetime( return _millis_to_datetime(int(dtm), cast("CodecOptions[Any]", json_options)) -def _parse_canonical_oid(doc: Any) -> ObjectId: +def _parse_canonical_oid(doc: Any, dummy0: Any) -> ObjectId: """Decode a JSON ObjectId to bson.objectid.ObjectId.""" if len(doc) != 1: raise TypeError(f"Bad $oid, extra field(s): {doc}") return ObjectId(doc["$oid"]) -def _parse_canonical_symbol(doc: Any) -> str: +def _parse_canonical_symbol(doc: Any, dummy0: Any) -> str: """Decode a JSON symbol to Python string.""" symbol = doc["$symbol"] if len(doc) != 1: @@ -722,7 +689,7 @@ def _parse_canonical_symbol(doc: Any) -> str: return str(symbol) -def _parse_canonical_code(doc: Any) -> Code: +def _parse_canonical_code(doc: Any, dummy0: Any) -> Code: """Decode a JSON code to bson.code.Code.""" for key in doc: if key not in ("$code", "$scope"): @@ -730,7 +697,7 @@ def _parse_canonical_code(doc: Any) -> Code: return Code(doc["$code"], scope=doc.get("$scope")) -def _parse_canonical_regex(doc: Any) -> Regex[str]: +def _parse_canonical_regex(doc: Any, dummy0: Any) -> Regex[str]: """Decode a JSON regex to bson.regex.Regex.""" regex = doc["$regularExpression"] if len(doc) != 1: @@ -747,12 +714,18 @@ def _parse_canonical_regex(doc: Any) -> Regex[str]: return Regex(regex["pattern"], opts) -def _parse_canonical_dbref(doc: Any) -> DBRef: +def _parse_canonical_dbref(doc: Any, dummy0: Any) -> Any: """Decode a JSON DBRef to bson.dbref.DBRef.""" - return DBRef(doc.pop("$ref"), doc.pop("$id"), database=doc.pop("$db", None), **doc) + if ( + isinstance(doc.get("$ref"), str) + and "$id" in doc + and isinstance(doc.get("$db"), (str, type(None))) + ): + return DBRef(doc.pop("$ref"), doc.pop("$id"), database=doc.pop("$db", None), **doc) + return doc -def _parse_canonical_dbpointer(doc: Any) -> Any: +def _parse_canonical_dbpointer(doc: Any, dummy0: Any) -> Any: """Decode a JSON (deprecated) DBPointer to bson.dbref.DBRef.""" dbref = doc["$dbPointer"] if len(doc) != 1: @@ -771,7 +744,7 @@ def _parse_canonical_dbpointer(doc: Any) -> Any: raise TypeError(f"Bad $dbPointer, expected a DBRef: {doc}") -def _parse_canonical_int32(doc: Any) -> int: +def _parse_canonical_int32(doc: Any, dummy0: Any) -> int: """Decode a JSON int32 to python int.""" i_str = doc["$numberInt"] if len(doc) != 1: @@ -781,7 +754,7 @@ def _parse_canonical_int32(doc: Any) -> int: return int(i_str) -def _parse_canonical_int64(doc: Any) -> Int64: +def _parse_canonical_int64(doc: Any, dummy0: Any) -> Int64: """Decode a JSON int64 to bson.int64.Int64.""" l_str = doc["$numberLong"] if len(doc) != 1: @@ -789,7 +762,7 @@ def _parse_canonical_int64(doc: Any) -> Int64: return Int64(l_str) -def _parse_canonical_double(doc: Any) -> float: +def _parse_canonical_double(doc: Any, dummy0: Any) -> float: """Decode a JSON double to python float.""" d_str = doc["$numberDouble"] if len(doc) != 1: @@ -799,7 +772,7 @@ def _parse_canonical_double(doc: Any) -> float: return float(d_str) -def _parse_canonical_decimal128(doc: Any) -> Decimal128: +def _parse_canonical_decimal128(doc: Any, dummy0: Any) -> Decimal128: """Decode a JSON decimal128 to bson.decimal128.Decimal128.""" d_str = doc["$numberDecimal"] if len(doc) != 1: @@ -809,7 +782,7 @@ def _parse_canonical_decimal128(doc: Any) -> Decimal128: return Decimal128(d_str) -def _parse_canonical_minkey(doc: Any) -> MinKey: +def _parse_canonical_minkey(doc: Any, dummy0: Any) -> MinKey: """Decode a JSON MinKey to bson.min_key.MinKey.""" if type(doc["$minKey"]) is not int or doc["$minKey"] != 1: # noqa: E721 raise TypeError(f"$minKey value must be 1: {doc}") @@ -818,7 +791,7 @@ def _parse_canonical_minkey(doc: Any) -> MinKey: return MinKey() -def _parse_canonical_maxkey(doc: Any) -> MaxKey: +def _parse_canonical_maxkey(doc: Any, dummy0: Any) -> MaxKey: """Decode a JSON MaxKey to bson.max_key.MaxKey.""" if type(doc["$maxKey"]) is not int or doc["$maxKey"] != 1: # noqa: E721 raise TypeError("$maxKey value must be 1: %s", (doc,)) @@ -827,103 +800,109 @@ def _parse_canonical_maxkey(doc: Any) -> MaxKey: return MaxKey() +def _parse_binary(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: + if "$type" in doc: + return _parse_legacy_binary(doc, json_options) + else: + return _parse_canonical_binary(doc, json_options) + + +def _parse_timestamp(doc: Any, dummy0: Any) -> Timestamp: + tsp = doc["$timestamp"] + return Timestamp(tsp["t"], tsp["i"]) + + +_PARSERS: dict[str, Callable[[Any, JSONOptions], Any]] = { + "$oid": _parse_canonical_oid, + "$ref": _parse_canonical_dbref, + "$date": _parse_canonical_datetime, + "$regex": _parse_legacy_regex, + "$minKey": _parse_canonical_minkey, + "$maxKey": _parse_canonical_maxkey, + "$binary": _parse_binary, + "$code": _parse_canonical_code, + "$uuid": _parse_legacy_uuid, + "$undefined": lambda _, _1: None, + "$numberLong": _parse_canonical_int64, + "$timestamp": _parse_timestamp, + "$numberDecimal": _parse_canonical_decimal128, + "$dbPointer": _parse_canonical_dbpointer, + "$regularExpression": _parse_canonical_regex, + "$symbol": _parse_canonical_symbol, + "$numberInt": _parse_canonical_int32, + "$numberDouble": _parse_canonical_double, +} +_PARSERS_SET = set(_PARSERS) + + def _encode_binary(data: bytes, subtype: int, json_options: JSONOptions) -> Any: if json_options.json_mode == JSONMode.LEGACY: - return SON([("$binary", base64.b64encode(data).decode()), ("$type", "%02x" % subtype)]) - return { - "$binary": SON([("base64", base64.b64encode(data).decode()), ("subType", "%02x" % subtype)]) - } + return {"$binary": base64.b64encode(data).decode(), "$type": "%02x" % subtype} + return {"$binary": {"base64": base64.b64encode(data).decode(), "subType": "%02x" % subtype}} -def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: - # We preserve key order when rendering SON, DBRef, etc. as JSON by - # returning a SON for those types instead of a dict. - if isinstance(obj, ObjectId): - return {"$oid": str(obj)} - if isinstance(obj, DBRef): - return _json_convert(obj.as_doc(), json_options=json_options) - if isinstance(obj, datetime.datetime): - if json_options.datetime_representation == DatetimeRepresentation.ISO8601: - if not obj.tzinfo: - obj = obj.replace(tzinfo=utc) - assert obj.tzinfo is not None - if obj >= EPOCH_AWARE: - off = obj.tzinfo.utcoffset(obj) - if (off.days, off.seconds, off.microseconds) == (0, 0, 0): # type: ignore - tz_string = "Z" - else: - tz_string = obj.strftime("%z") - millis = int(obj.microsecond / 1000) - fracsecs = ".%03d" % (millis,) if millis else "" - return { - "$date": "{}{}{}".format(obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) - } - - millis = _datetime_to_millis(obj) - if json_options.datetime_representation == DatetimeRepresentation.LEGACY: - return {"$date": millis} - return {"$date": {"$numberLong": str(millis)}} - if isinstance(obj, DatetimeMS): - if ( - json_options.datetime_representation == DatetimeRepresentation.ISO8601 - and 0 <= int(obj) <= _max_datetime_ms() - ): - return default(obj.as_datetime(), json_options) - elif json_options.datetime_representation == DatetimeRepresentation.LEGACY: - return {"$date": str(int(obj))} - return {"$date": {"$numberLong": str(int(obj))}} - if json_options.strict_number_long and isinstance(obj, Int64): +def _encode_datetimems(obj: Any, json_options: JSONOptions) -> dict: + if ( + json_options.datetime_representation == DatetimeRepresentation.ISO8601 + and 0 <= int(obj) <= _max_datetime_ms() + ): + return _encode_datetime(obj.as_datetime(), json_options) + elif json_options.datetime_representation == DatetimeRepresentation.LEGACY: + return {"$date": str(int(obj))} + return {"$date": {"$numberLong": str(int(obj))}} + + +def _encode_code(obj: Code, json_options: JSONOptions) -> dict: + if obj.scope is None: + return {"$code": str(obj)} + else: + return {"$code": str(obj), "$scope": _json_convert(obj.scope, json_options)} + + +def _encode_int64(obj: Int64, json_options: JSONOptions) -> Any: + if json_options.strict_number_long: return {"$numberLong": str(obj)} - if isinstance(obj, (RE_TYPE, Regex)): - flags = "" - if obj.flags & re.IGNORECASE: - flags += "i" - if obj.flags & re.LOCALE: - flags += "l" - if obj.flags & re.MULTILINE: - flags += "m" - if obj.flags & re.DOTALL: - flags += "s" - if obj.flags & re.UNICODE: - flags += "u" - if obj.flags & re.VERBOSE: - flags += "x" - if isinstance(obj.pattern, str): - pattern = obj.pattern - else: - pattern = obj.pattern.decode("utf-8") - if json_options.json_mode == JSONMode.LEGACY: - return SON([("$regex", pattern), ("$options", flags)]) - return {"$regularExpression": SON([("pattern", pattern), ("options", flags)])} - if isinstance(obj, MinKey): - return {"$minKey": 1} - if isinstance(obj, MaxKey): - return {"$maxKey": 1} - if isinstance(obj, Timestamp): - return {"$timestamp": SON([("t", obj.time), ("i", obj.inc)])} - if isinstance(obj, Code): - if obj.scope is None: - return {"$code": str(obj)} - return SON([("$code", str(obj)), ("$scope", _json_convert(obj.scope, json_options))]) - if isinstance(obj, Binary): - return _encode_binary(obj, obj.subtype, json_options) - if isinstance(obj, bytes): - return _encode_binary(obj, 0, json_options) - if isinstance(obj, uuid.UUID): - if json_options.strict_uuid: - binval = Binary.from_uuid(obj, uuid_representation=json_options.uuid_representation) - return _encode_binary(binval, binval.subtype, json_options) - else: - return {"$uuid": obj.hex} - if isinstance(obj, Decimal128): - return {"$numberDecimal": str(obj)} - if isinstance(obj, bool): - return obj - if json_options.json_mode == JSONMode.CANONICAL and isinstance(obj, int): - if -(2**31) <= obj < 2**31: + else: + return int(obj) + + +def _encode_noop(obj: Any, dummy0: Any) -> Any: + return obj + + +def _encode_regex(obj: Any, json_options: JSONOptions) -> dict: + flags = "" + if obj.flags & re.IGNORECASE: + flags += "i" + if obj.flags & re.LOCALE: + flags += "l" + if obj.flags & re.MULTILINE: + flags += "m" + if obj.flags & re.DOTALL: + flags += "s" + if obj.flags & re.UNICODE: + flags += "u" + if obj.flags & re.VERBOSE: + flags += "x" + if isinstance(obj.pattern, str): + pattern = obj.pattern + else: + pattern = obj.pattern.decode("utf-8") + if json_options.json_mode == JSONMode.LEGACY: + return {"$regex": pattern, "$options": flags} + return {"$regularExpression": {"pattern": pattern, "options": flags}} + + +def _encode_int(obj: int, json_options: JSONOptions) -> Any: + if json_options.json_mode == JSONMode.CANONICAL: + if -_INT32_MAX <= obj < _INT32_MAX: return {"$numberInt": str(obj)} return {"$numberLong": str(obj)} - if json_options.json_mode != JSONMode.LEGACY and isinstance(obj, float): + return obj + + +def _encode_float(obj: float, json_options: JSONOptions) -> Any: + if json_options.json_mode != JSONMode.LEGACY: if math.isnan(obj): return {"$numberDouble": "NaN"} elif math.isinf(obj): @@ -933,4 +912,250 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: # repr() will return the shortest string guaranteed to produce the # original value, when float() is called on it. return {"$numberDouble": str(repr(obj))} + return obj + + +def _encode_datetime(obj: datetime.datetime, json_options: JSONOptions) -> dict: + if json_options.datetime_representation == DatetimeRepresentation.ISO8601: + if not obj.tzinfo: + obj = obj.replace(tzinfo=utc) + assert obj.tzinfo is not None + if obj >= EPOCH_AWARE: + off = obj.tzinfo.utcoffset(obj) + if (off.days, off.seconds, off.microseconds) == (0, 0, 0): # type: ignore + tz_string = "Z" + else: + tz_string = obj.strftime("%z") + millis = int(obj.microsecond / 1000) + fracsecs = ".%03d" % (millis,) if millis else "" + return { + "$date": "{}{}{}".format(obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) + } + + millis = _datetime_to_millis(obj) + if json_options.datetime_representation == DatetimeRepresentation.LEGACY: + return {"$date": millis} + return {"$date": {"$numberLong": str(millis)}} + + +def _encode_bytes(obj: bytes, json_options: JSONOptions) -> dict: + return _encode_binary(obj, 0, json_options) + + +def _encode_binary_obj(obj: Binary, json_options: JSONOptions) -> dict: + return _encode_binary(obj, obj.subtype, json_options) + + +def _encode_uuid(obj: uuid.UUID, json_options: JSONOptions) -> dict: + if json_options.strict_uuid: + binval = Binary.from_uuid(obj, uuid_representation=json_options.uuid_representation) + return _encode_binary(binval, binval.subtype, json_options) + else: + return {"$uuid": obj.hex} + + +def _encode_objectid(obj: ObjectId, dummy0: Any) -> dict: + return {"$oid": str(obj)} + + +def _encode_timestamp(obj: Timestamp, dummy0: Any) -> dict: + return {"$timestamp": {"t": obj.time, "i": obj.inc}} + + +def _encode_decimal128(obj: Timestamp, dummy0: Any) -> dict: + return {"$numberDecimal": str(obj)} + + +def _encode_dbref(obj: DBRef, json_options: JSONOptions) -> dict: + return _json_convert(obj.as_doc(), json_options=json_options) + + +def _encode_minkey(dummy0: Any, dummy1: Any) -> dict: + return {"$minKey": 1} + + +def _encode_maxkey(dummy0: Any, dummy1: Any) -> dict: + return {"$maxKey": 1} + + +# Encoders for BSON types +# Each encoder function's signature is: +# - obj: a Python data type, e.g. a Python int for _encode_int +# - json_options: a JSONOptions +_ENCODERS: dict[Type, Callable[[Any, JSONOptions], Any]] = { + bool: _encode_noop, + bytes: _encode_bytes, + datetime.datetime: _encode_datetime, + DatetimeMS: _encode_datetimems, + float: _encode_float, + int: _encode_int, + str: _encode_noop, + type(None): _encode_noop, + uuid.UUID: _encode_uuid, + Binary: _encode_binary_obj, + Int64: _encode_int64, + Code: _encode_code, + DBRef: _encode_dbref, + MaxKey: _encode_maxkey, + MinKey: _encode_minkey, + ObjectId: _encode_objectid, + Regex: _encode_regex, + RE_TYPE: _encode_regex, + Timestamp: _encode_timestamp, + Decimal128: _encode_decimal128, +} + +# Map each _type_marker to its encoder for faster lookup. +_MARKERS: dict[int, Callable[[Any, JSONOptions], Any]] = {} +for _typ in _ENCODERS: + if hasattr(_typ, "_type_marker"): + _MARKERS[_typ._type_marker] = _ENCODERS[_typ] + +_BUILT_IN_TYPES = tuple(t for t in _ENCODERS) + + +def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: + # First see if the type is already cached. KeyError will only ever + # happen once per subtype. + try: + return _ENCODERS[type(obj)](obj, json_options) + except KeyError: + pass + + # Second, fall back to trying _type_marker. This has to be done + # before the loop below since users could subclass one of our + # custom types that subclasses a python built-in (e.g. Binary) + if hasattr(obj, "_type_marker"): + marker = obj._type_marker + if marker in _MARKERS: + func = _MARKERS[marker] + # Cache this type for faster subsequent lookup. + _ENCODERS[type(obj)] = func + return func(obj, json_options) + + # Third, test each base type. This will only happen once for + # a subtype of a supported base type. + for base in _BUILT_IN_TYPES: + if isinstance(obj, base): + func = _ENCODERS[base] + # Cache this type for faster subsequent lookup. + _ENCODERS[type(obj)] = func + return func(obj, json_options) + raise TypeError("%r is not JSON serializable" % obj) + + +def _get_str_size(obj: Any) -> int: + return len(obj) + + +def _get_datetime_size(obj: datetime.datetime) -> int: + return 5 + len(str(obj.time())) + + +def _get_regex_size(obj: Regex) -> int: + return 18 + len(obj.pattern) + + +def _get_dbref_size(obj: DBRef) -> int: + return 34 + len(obj.collection) + + +_CONSTANT_SIZE_TABLE: dict[Any, int] = { + ObjectId: 28, + int: 11, + Int64: 11, + Decimal128: 11, + Timestamp: 14, + MinKey: 8, + MaxKey: 8, +} + +_VARIABLE_SIZE_TABLE: dict[Any, Callable[[Any], int]] = { + str: _get_str_size, + bytes: _get_str_size, + datetime.datetime: _get_datetime_size, + Regex: _get_regex_size, + DBRef: _get_dbref_size, +} + + +def get_size(obj: Any, max_size: int, current_size: int = 0) -> int: + """Recursively finds size of objects""" + if current_size >= max_size: + return current_size + + obj_type = type(obj) + + # Check to see if the obj has a constant size estimate + try: + return _CONSTANT_SIZE_TABLE[obj_type] + except KeyError: + pass + + # Check to see if the obj has a variable but simple size estimate + try: + return _VARIABLE_SIZE_TABLE[obj_type](obj) + except KeyError: + pass + + # Special cases that require recursion + if obj_type == Code: + if obj.scope: + current_size += ( + 5 + get_size(obj.scope, max_size, current_size) + len(obj) - len(obj.scope) + ) + else: + current_size += 5 + len(obj) + elif obj_type == dict: + for k, v in obj.items(): + current_size += get_size(k, max_size, current_size) + current_size += get_size(v, max_size, current_size) + if current_size >= max_size: + return current_size + elif hasattr(obj, "__iter__"): + for i in obj: + current_size += get_size(i, max_size, current_size) + if current_size >= max_size: + return current_size + return current_size + + +def _truncate_documents(obj: Any, max_length: int) -> Tuple[Any, int]: + """Recursively truncate documents as needed to fit inside max_length characters.""" + if max_length <= 0: + return None, 0 + remaining = max_length + if hasattr(obj, "items"): + truncated: Any = {} + for k, v in obj.items(): + truncated_v, remaining = _truncate_documents(v, remaining) + if truncated_v: + truncated[k] = truncated_v + if remaining <= 0: + break + return truncated, remaining + elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes)): + truncated: Any = [] # type:ignore[no-redef] + for v in obj: + truncated_v, remaining = _truncate_documents(v, remaining) + if truncated_v: + truncated.append(truncated_v) + if remaining <= 0: + break + return truncated, remaining + else: + return _truncate(obj, remaining) + + +def _truncate(obj: Any, remaining: int) -> Tuple[Any, int]: + size = get_size(obj, remaining) + + if size <= remaining: + return obj, remaining - size + else: + try: + truncated = obj[:remaining] + except TypeError: + truncated = obj + return truncated, remaining - size diff --git a/bson/objectid.py b/bson/objectid.py index 2a3d9ebf5b..57efdc7983 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -84,8 +84,7 @@ def __init__(self, oid: Optional[Union[str, ObjectId, bytes]] = None) -> None: Raises :class:`~bson.errors.InvalidId` if `oid` is not 12 bytes nor 24 hex digits, or :class:`TypeError` if `oid` is not an accepted type. - :Parameters: - - `oid` (optional): a valid ObjectId. + :param oid: a valid ObjectId. .. seealso:: The MongoDB documentation on `ObjectIds `_. @@ -126,8 +125,7 @@ def from_datetime(cls: Type[ObjectId], generation_time: datetime.datetime) -> Ob >>> dummy_id = ObjectId.from_datetime(gen_time) >>> result = collection.find({"_id": {"$lt": dummy_id}}) - :Parameters: - - `generation_time`: :class:`~datetime.datetime` to be used + :param generation_time: :class:`~datetime.datetime` to be used as the generation time for the resulting ObjectId. """ offset = generation_time.utcoffset() @@ -141,8 +139,7 @@ def from_datetime(cls: Type[ObjectId], generation_time: datetime.datetime) -> Ob def is_valid(cls: Type[ObjectId], oid: Any) -> bool: """Checks if a `oid` string is valid or not. - :Parameters: - - `oid`: the object id to validate + :param oid: the object id to validate .. versionadded:: 2.3 """ @@ -186,8 +183,7 @@ def __validate(self, oid: Any) -> None: :class:`bytes`, or ObjectId. Raises InvalidId if it is not a valid ObjectId. - :Parameters: - - `oid`: a valid ObjectId + :param oid: a valid ObjectId """ if isinstance(oid, ObjectId): self.__id = oid.binary diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 50362398a3..2ce53143c2 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -52,29 +52,24 @@ """ from __future__ import annotations -from typing import Any, ItemsView, Iterator, Mapping, MutableMapping, Optional +from typing import Any, ItemsView, Iterator, Mapping, Optional from bson import _get_object_size, _raw_to_dict from bson.codec_options import _RAW_BSON_DOCUMENT_MARKER, CodecOptions from bson.codec_options import DEFAULT_CODEC_OPTIONS as DEFAULT -from bson.son import SON def _inflate_bson( bson_bytes: bytes, codec_options: CodecOptions[RawBSONDocument], raw_array: bool = False -) -> MutableMapping[str, Any]: +) -> dict[str, Any]: """Inflates the top level fields of a BSON document. - :Parameters: - - `bson_bytes`: the BSON bytes that compose this document - - `codec_options`: An instance of + :param bson_bytes: the BSON bytes that compose this document + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions` whose ``document_class`` must be :class:`RawBSONDocument`. """ - # Use SON to preserve ordering of elements. - return _raw_to_dict( - bson_bytes, 4, len(bson_bytes) - 1, codec_options, SON(), raw_array=raw_array - ) + return _raw_to_dict(bson_bytes, 4, len(bson_bytes) - 1, codec_options, {}, raw_array=raw_array) class RawBSONDocument(Mapping[str, Any]): @@ -110,9 +105,8 @@ class from the standard library so it can be used like a read-only >>> raw_doc['_id'] 'my_doc' - :Parameters: - - `bson_bytes`: the BSON bytes that compose this document - - `codec_options` (optional): An instance of + :param bson_bytes: the BSON bytes that compose this document + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions` whose ``document_class`` must be :class:`RawBSONDocument`. The default is :attr:`DEFAULT_RAW_BSON_OPTIONS`. @@ -154,7 +148,6 @@ def __inflated(self) -> Mapping[str, Any]: if self.__inflated_doc is None: # We already validated the object's size when this document was # created, so no need to do that again. - # Use SON to preserve ordering of elements. self.__inflated_doc = self._inflate_bson(self.__raw, self.__codec_options) return self.__inflated_doc diff --git a/bson/regex.py b/bson/regex.py index e3ca1ab69f..60cff4fd08 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -66,8 +66,7 @@ def from_native(cls: Type[Regex[Any]], regex: Pattern[_T]) -> Regex[_T]: >>> regex.flags ^= re.UNICODE >>> db.collection.insert_one({'pattern': regex}) - :Parameters: - - `regex`: A regular expression object from ``re.compile()``. + :param regex: A regular expression object from ``re.compile()``. .. warning:: Python regular expressions use a different syntax and different @@ -89,9 +88,8 @@ def __init__(self, pattern: _T, flags: Union[str, int] = 0) -> None: This class is useful to store and retrieve regular expressions that are incompatible with Python's regular expression dialect. - :Parameters: - - `pattern`: string - - `flags`: (optional) an integer bitmask, or a string of flag + :param pattern: string + :param flags: an integer bitmask, or a string of flag characters like "im" for IGNORECASE and MULTILINE """ if not isinstance(pattern, (str, bytes)): diff --git a/bson/son.py b/bson/son.py index c5df4e5972..cf62717238 100644 --- a/bson/son.py +++ b/bson/son.py @@ -159,7 +159,9 @@ def update(self, other: Optional[Any] = None, **kwargs: _Value) -> None: # type if kwargs: self.update(kwargs) - def get(self, key: _Key, default: Optional[Union[_Value, _T]] = None) -> Union[_Value, _T, None]: # type: ignore[override] + def get( # type: ignore[override] + self, key: _Key, default: Optional[Union[_Value, _T]] = None + ) -> Union[_Value, _T, None]: try: return self[key] except KeyError: diff --git a/bson/timestamp.py b/bson/timestamp.py index 9bc6a715b6..3e76e7baad 100644 --- a/bson/timestamp.py +++ b/bson/timestamp.py @@ -47,11 +47,10 @@ def __init__(self, time: Union[datetime.datetime, int], inc: int) -> None: an instance of :class:`int`. Raises :class:`ValueError` if `time` or `inc` is not in [0, 2**32). - :Parameters: - - `time`: time in seconds since epoch UTC, or a naive UTC + :param time: time in seconds since epoch UTC, or a naive UTC :class:`~datetime.datetime`, or an aware :class:`~datetime.datetime` - - `inc`: the incrementing counter + :param inc: the incrementing counter """ if isinstance(time, datetime.datetime): offset = time.utcoffset() diff --git a/doc/api/pymongo/auth_oidc.rst b/doc/api/pymongo/auth_oidc.rst new file mode 100644 index 0000000000..1466b21e9d --- /dev/null +++ b/doc/api/pymongo/auth_oidc.rst @@ -0,0 +1,5 @@ +:mod:`auth_oidc` -- MONGODB-OIDC Authentication +=========================================================================== + +.. automodule:: pymongo.auth_oidc + :members: diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index a75c0ac586..e3746c68b7 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -69,3 +69,5 @@ .. automethod:: drop .. automethod:: rename .. automethod:: options + .. automethod:: __getitem__ + .. automethod:: __getattr__ diff --git a/doc/api/pymongo/database.rst b/doc/api/pymongo/database.rst index b40a77dff3..044e04971e 100644 --- a/doc/api/pymongo/database.rst +++ b/doc/api/pymongo/database.rst @@ -20,6 +20,8 @@ .. note:: Use dictionary style access if `collection_name` is an attribute of the :class:`Database` class eg: db[`collection_name`]. + .. automethod:: __getitem__ + .. automethod:: __getattr__ .. autoattribute:: codec_options .. autoattribute:: read_preference .. autoattribute:: write_concern diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index 625c138170..2beb8f5745 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -29,6 +29,7 @@ Sub-modules: .. toctree:: :maxdepth: 2 + auth_oidc change_stream client_options client_session diff --git a/doc/api/pymongo/mongo_client.rst b/doc/api/pymongo/mongo_client.rst index 83dab27f2c..37ec8ae002 100644 --- a/doc/api/pymongo/mongo_client.rst +++ b/doc/api/pymongo/mongo_client.rst @@ -35,3 +35,5 @@ .. automethod:: get_database .. automethod:: server_info .. automethod:: watch + .. automethod:: __getitem__ + .. automethod:: __getattr__ diff --git a/doc/atlas.rst b/doc/atlas.rst index 6685cf9fb8..19ba9732f2 100644 --- a/doc/atlas.rst +++ b/doc/atlas.rst @@ -35,7 +35,7 @@ Connections to Atlas require TLS/SSL. You can read more about TLS versions and their security implications here: - ``_ + ``_ .. _python.org: https://www.python.org/downloads/ .. _homebrew: https://brew.sh/ diff --git a/doc/changelog.rst b/doc/changelog.rst index e04452e483..1935fda233 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,230 @@ Changelog ========= +Changes in Version 4.8.0 +------------------------- + +.. warning:: PyMongo 4.8 drops support for Python 3.7 and PyPy 3.8: Python 3.8+ or PyPy 3.9+ is now required. + +PyMongo 4.8 brings a number of improvements including: + +- The handshake metadata for "os.name" on Windows has been simplified to "Windows" to improve import time. +- The repr of ``bson.binary.Binary`` is now redacted when the subtype is SENSITIVE_SUBTYPE(8). +- Secure Software Development Life Cycle automation for release process. + GitHub Releases now include a Software Bill of Materials, and signature + files corresponding to the distribution files released on PyPI. +- Fixed a bug in change streams where both ``startAtOperationTime`` and ``resumeToken`` + could be added to a retry attempt, which caused the retry to fail. +- Fallback to stdlib ``ssl`` module when ``pyopenssl`` import fails with AttributeError. +- Improved performance of MongoClient operations, especially when many operations are being run concurrently. + +Unavoidable breaking changes +............................ + +- Since we are now using ``hatch`` as our build backend, we no longer have a usable ``setup.py`` file + and require installation using ``pip``. Attempts to invoke the ``setup.py`` file will raise an exception. + Additionally, ``pip`` >= 21.3 is now required for editable installs. + +Issues Resolved +............... + +See the `PyMongo 4.8 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.8 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=37057 + +Changes in Version 4.7.3 +------------------------- + +Version 4.7.3 has further fixes for lazily loading modules. + +- Use deferred imports instead of importlib lazy module loading. +- Improve import time on Windows. +- Reduce verbosity of "Waiting for suitable server to become available" log message from info to debug. + +Issues Resolved +............... + +See the `PyMongo 4.7.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.7.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39865 + +Changes in Version 4.7.2 +------------------------- + +Version 4.7.2 fixes a bug introduced in 4.7.0: + +- Fixed a bug where PyMongo could not be used with the Nuitka compiler. + +Issues Resolved +............... + +See the `PyMongo 4.7.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.7.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39710 + + +Changes in Version 4.7.1 +------------------------- + +Version 4.7.1 fixes a bug introduced in 4.7.0: + +- Fixed a bug where PyMongo would cause an ``AttributeError`` if ``dns.resolver`` was imported and referenced + after PyMongo was imported. +- Clarified the behavior of the ``TOKEN_RESOURCE`` auth mechanism property for ``MONGODB-OIDC``. + +Issues Resolved +............... + +See the `PyMongo 4.7.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.7.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39680 + +Changes in Version 4.7 +------------------------ + +PyMongo 4.7 brings a number of improvements including: + +- Added support for ``MONGODB-OIDC`` authentication. The MONGODB-OIDC mechanism authenticates + using an OpenID Connect (OIDC) access token. + The driver supports OIDC for workload identity, defined as an identity you assign to a software workload + (such as an application, service, script, or container) to authenticate and access other services and resources. + Please see :doc:`examples/authentication` for more information. +- Added support for Python's `native logging library `_, + enabling developers to customize the verbosity of log messages for their applications. + Please see :doc:`examples/logging` for more information. +- Significantly improved the performance of encoding BSON documents to JSON. +- Added support for named KMS providers for client side field level encryption. + Previously supported KMS providers were only: aws, azure, gcp, kmip, and local. + The KMS provider is now expanded to support name suffixes (e.g. local:myname). + Named KMS providers enables more than one of each KMS provider type to be configured. + See the docstring for :class:`~pymongo.encryption_options.AutoEncryptionOpts`. + Note that named KMS providers requires pymongocrypt >=1.9 and libmongocrypt >=1.9. +- Added the :class:`pymongo.hello.Hello.connection_id`, + :attr:`pymongo.monitoring.CommandStartedEvent.server_connection_id`, + :attr:`pymongo.monitoring.CommandSucceededEvent.server_connection_id`, and + :attr:`pymongo.monitoring.CommandFailedEvent.server_connection_id` properties. +- Fixed a bug where inflating a :class:`~bson.raw_bson.RawBSONDocument` containing a :class:`~bson.code.Code` would cause an error. +- :meth:`~pymongo.encryption.ClientEncryption.encrypt` and + :meth:`~pymongo.encryption.ClientEncryption.encrypt_expression` now allow ``key_id`` + to be passed in as a :class:`uuid.UUID`. +- Fixed a bug where :class:`~bson.int64.Int64` instances could not always be encoded by `orjson`_. The following now + works:: + + >>> import orjson + >>> from bson import json_util + >>> orjson.dumps({'a': Int64(1)}, default=json_util.default, option=orjson.OPT_PASSTHROUGH_SUBCLASS) + +.. _orjson: https://github.com/ijl/orjson + +- Fixed a bug appearing in Python 3.12 where "RuntimeError: can't create new thread at interpreter shutdown" + could be written to stderr when a MongoClient's thread starts as the python interpreter is shutting down. +- Added a warning when connecting to DocumentDB and CosmosDB clusters. + For more information regarding feature compatibility and support please visit + `mongodb.com/supportability/documentdb `_ and + `mongodb.com/supportability/cosmosdb `_. +- Added the :attr:`pymongo.monitoring.ConnectionCheckedOutEvent.duration`, + :attr:`pymongo.monitoring.ConnectionCheckOutFailedEvent.duration`, and + :attr:`pymongo.monitoring.ConnectionReadyEvent.duration` properties. +- Added the ``type`` and ``kwargs`` arguments to :class:`~pymongo.operations.SearchIndexModel` to enable + creating vector search indexes in MongoDB Atlas. +- Fixed a bug where ``read_concern`` and ``write_concern`` were improperly added to + :meth:`~pymongo.collection.Collection.list_search_indexes` queries. +- Deprecated :attr:`pymongo.write_concern.WriteConcern.wtimeout` and :attr:`pymongo.mongo_client.MongoClient.wTimeoutMS`. + Use :meth:`~pymongo.timeout` instead. + +.. warning:: PyMongo depends on ``dnspython``, which released version 2.6.1 with a fix for + `CVE-2023-29483 `_. We do not explicitly require + that version, but we strongly recommend that you install at least that version in your environment. + +Unavoidable breaking changes +............................ + +- Replaced usage of :class:`bson.son.SON` on all internal classes and commands to dict, + :attr:`options.pool_options.metadata` is now of type ``dict`` as opposed to :class:`bson.son.SON`. + Here's some examples of how this changes expected output as well as how to convert from :class:`dict` to :class:`bson.son.SON`:: + + # Before + >>> from pymongo import MongoClient + >>> client = MongoClient() + >>> client.options.pool_options.metadata + SON([('driver', SON([('name', 'PyMongo'), ('version', '4.7.0.dev0')])), ('os', SON([('type', 'Darwin'), ('name', 'Darwin'), ('architecture', 'arm64'), ('version', '14.3')])), ('platform', 'CPython 3.11.6.final.0')]) + + # After + >>> client.options.pool_options.metadata + {'driver': {'name': 'PyMongo', 'version': '4.7.0.dev0'}, 'os': {'type': 'Darwin', 'name': 'Darwin', 'architecture': 'arm64', 'version': '14.3'}, 'platform': 'CPython 3.11.6.final.0'} + + # To convert from dict to SON + # This will only convert the first layer of the dictionary + >>> data_as_dict = client.options.pool_options.metadata + >>> SON(data_as_dict) + SON([('driver', {'name': 'PyMongo', 'version': '4.7.0.dev0'}), ('os', {'type': 'Darwin', 'name': 'Darwin', 'architecture': 'arm64', 'version': '14.3'}), ('platform', 'CPython 3.11.6.final.0')]) + + # To convert from dict to SON on a nested dictionary + >>> def dict_to_SON(data_as_dict: dict[Any, Any]): + ... data_as_SON = SON() + ... for key, value in data_as_dict.items(): + ... data_as_SON[key] = dict_to_SON(value) if isinstance(value, dict) else value + ... return data_as_SON + >>> + >>> dict_to_SON(data_as_dict) + SON([('driver', SON([('name', 'PyMongo'), ('version', '4.7.0.dev0')])), ('os', SON([('type', 'Darwin'), ('name', 'Darwin'), ('architecture', 'arm64'), ('version', '14.3')])), ('platform', 'CPython 3.11.6.final.0')]) + +- PyMongo now uses `lazy imports `_ for external dependencies. + If you are relying on any kind of monkey-patching of the standard library, you may need to explicitly import those external libraries in addition + to ``pymongo`` before applying the patch. Note that we test with ``gevent`` and ``eventlet`` patching, and those continue to work. + +- The "aws" extra now requires minimum version of ``1.1.0`` for ``pymongo_auth_aws``. + +Changes in Version 4.6.3 +------------------------ + +PyMongo 4.6.3 fixes the following bug: + +- Fixed a potential memory access violation when decoding invalid bson. + +Issues Resolved +............... + +See the `PyMongo 4.6.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.6.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=38360 + +Changes in Version 4.6.2 +------------------------ + +PyMongo 4.6.2 fixes the following bug: + +- Fixed a bug appearing in Python 3.12 where "RuntimeError: can't create new thread at interpreter shutdown" + could be written to stderr when a MongoClient's thread starts as the python interpreter is shutting down. + +Issues Resolved +............... + +See the `PyMongo 4.6.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.6.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=37906 + +Changes in Version 4.6.1 +------------------------ + +PyMongo 4.6.1 fixes the following bug: + +- Ensure retryable read ``OperationFailure`` errors re-raise exception when 0 or NoneType error code is provided. + +Issues Resolved +............... + +See the `PyMongo 4.6.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.6.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=37138 + Changes in Version 4.6 ---------------------- @@ -33,6 +257,14 @@ PyMongo 4.6 brings a number of improvements including: - Added the :ref:`network-compression-example` documentation page. - Added more timeout information to network errors. +Issues Resolved +............... + +See the `PyMongo 4.6 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.6 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=36542 + Changes in Version 4.5 ---------------------- diff --git a/doc/compatibility-policy.rst b/doc/compatibility-policy.rst index a20b9681eb..834f86ce54 100644 --- a/doc/compatibility-policy.rst +++ b/doc/compatibility-policy.rst @@ -59,4 +59,4 @@ deprecated PyMongo features. .. _the warnings module: https://docs.python.org/3/library/warnings.html -.. _the -W command line option: https://docs.python.org/3/using/cmdline.html#cmdoption-w +.. _the -W command line option: https://docs.python.org/3/using/cmdline.html#cmdoption-W diff --git a/doc/conf.py b/doc/conf.py index 1ea51add88..f0d9f921bb 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -94,6 +94,8 @@ # -- Options for extensions ---------------------------------------------------- autoclass_content = "init" +autodoc_typehints = "description" + doctest_path = [Path("..").resolve()] doctest_test_doctest_blocks = "" diff --git a/doc/contributors.rst b/doc/contributors.rst index 2a4ca1ea47..49fb2d844d 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -98,3 +98,6 @@ The following is a list of people who have contributed to - Dainis Gorbunovs (DainisGorbunovs) - Iris Ho (sleepyStick) - Stephan Hof (stephan-hof) +- Casey Clements (caseyclements) +- Ivan Lukyanchikov (ilukyanchikov) +- Terry Patterson diff --git a/doc/docs-requirements.txt b/doc/docs-requirements.txt deleted file mode 100644 index d8fd12b54d..0000000000 --- a/doc/docs-requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -sphinx>=5.3,<7 -sphinx_rtd_theme~=0.5 -readthedocs-sphinx-search~=0.1 -sphinxcontrib-shellcheck~=1.1 -furo==2022.12.7 diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst index 22e19e9842..9b1a89fba7 100644 --- a/doc/examples/aggregation.rst +++ b/doc/examples/aggregation.rst @@ -45,8 +45,8 @@ To achieve this we need to pass in three operations to the pipeline. First, we need to unwind the ``tags`` array, then group by the tags and sum them up, finally we sort by count. -As python dictionaries don't maintain order you should use :class:`~bson.son.SON` -or :class:`collections.OrderedDict` where explicit ordering is required +Python dictionaries prior to 3.7 don't maintain order. You should use :class:`~bson.son.SON` +or :class:`collections.OrderedDict` where explicit ordering is required for an older Python version eg "$sort": .. note:: diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index b3ba89026e..24b3cff8df 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -384,3 +384,165 @@ would be:: .. _Assume Role: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html .. _EC2 instance: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html .. _environment variables: https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime + +MONGODB-OIDC +------------ +.. versionadded:: 4.7 + +The `MONGODB-OIDC authentication mechanism`_ is available in MongoDB 7.0+ on Linux platforms. + +The MONGODB-OIDC mechanism authenticates using an OpenID Connect (OIDC) access token. +The driver supports OIDC for workload identity, defined as an identity you assign to a software workload +(such as an application, service, script, or container) to authenticate and access other services and resources. + +Credentials can be configured through the MongoDB URI or as arguments to +:class:`~pymongo.mongo_client.MongoClient`. + +Built-in Support +~~~~~~~~~~~~~~~~ + +The driver has built-in support for Azure IMDS and GCP IMDS environments. Other environments +are supported with `Custom Callbacks`_. + +Azure IMDS +^^^^^^^^^^ + +For an application running on an Azure VM or otherwise using the `Azure Internal Metadata Service`_, +you can use the built-in support for Azure. If using an Azure managed identity, the "" is +the client ID. If using a service principal to represent an enterprise application, the "" is +the application ID of the service principal. The ```` value is the ``audience`` +`configured on your MongoDB deployment`_. + +.. code-block:: python + + import os + + uri = os.environ["MONGODB_URI"] + + props = {"ENVIRONMENT": "azure", "TOKEN_RESOURCE": ""} + c = MongoClient( + uri, + username="", + authMechanism="MONGODB-OIDC", + authMechanismProperties=props, + ) + c.test.test.insert_one({}) + c.close() + +If the application is running on an Azure VM and only one managed identity is associated with the +VM, ``username`` can be omitted. + +If providing the ``TOKEN_RESOURCE`` as part of a connection string, it can be given as follows. +If the ``TOKEN_RESOURCE`` contains any of the following characters [``,``, ``+``, ``&``], then +it MUST be url-encoded. + +.. code-block:: python + + import os + + uri = f'{os.environ["MONGODB_URI"]}?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:' + c = MongoClient(uri) + c.test.test.insert_one({}) + c.close() + +GCP IMDS +^^^^^^^^ + +For an application running on an GCP VM or otherwise using the `GCP Internal Metadata Service`_, +you can use the built-in support for GCP, where ```` below is the ``audience`` +`configured on your MongoDB deployment`_. + +.. code-block:: python + + import os + + uri = os.environ["MONGODB_URI"] + + props = {"ENVIRONMENT": "gcp", "TOKEN_RESOURCE": ""} + c = MongoClient(uri, authMechanism="MONGODB-OIDC", authMechanismProperties=props) + c.test.test.insert_one({}) + c.close() + +If providing the ``TOKEN_RESOURCE`` as part of a connection string, it can be given as follows. +If the ``TOKEN_RESOURCE`` contains any of the following characters [``,``, ``+``, ``&``], then +it MUST be url-encoded. + +.. code-block:: python + + import os + + uri = f'{os.environ["MONGODB_URI"]}?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:gcp,TOKEN_RESOURCE:' + c = MongoClient(uri) + c.test.test.insert_one({}) + c.close() + +Custom Callbacks +~~~~~~~~~~~~~~~~ + +For environments that are not directly supported by the driver, you can use :class:`~pymongo.auth_oidc.OIDCCallback`. +Some examples are given below. + +Other Azure Environments +^^^^^^^^^^^^^^^^^^^^^^^^ + +For applications running on Azure Functions, App Service Environment (ASE), or +Azure Kubernetes Service (AKS), you can use the `azure-identity package`_ +to fetch the credentials. This example assumes you have set environment variables for +the ``audience`` `configured on your MongoDB deployment`_, and for the client id of the Azure +managed identity. + +.. code-block:: python + + import os + from azure.identity import DefaultAzureCredential + from pymongo import MongoClient + from pymongo.auth_oidc import OIDCCallback, OIDCCallbackContext, OIDCCallbackResult + + audience = os.environ["AZURE_AUDIENCE"] + client_id = os.environ["AZURE_IDENTITY_CLIENT_ID"] + uri = os.environ["MONGODB_URI"] + + + class MyCallback(OIDCCallback): + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + credential = DefaultAzureCredential(managed_identity_client_id=client_id) + token = credential.get_token(f"{audience}/.default").token + return OIDCCallbackResult(access_token=token) + + + props = {"OIDC_CALLBACK": MyCallback()} + c = MongoClient(uri, authMechanism="MONGODB-OIDC", authMechanismProperties=props) + c.test.test.insert_one({}) + c.close() + +GCP GKE +^^^^^^^ + +For a Google Kubernetes Engine cluster with a `configured service account`_, the token can be read from the standard +service account token file location. + +.. code-block:: python + + import os + from pymongo.auth_oidc import OIDCCallback, OIDCCallbackContext, OIDCCallbackResult + + + class MyCallback(OIDCCallback): + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + with open("/var/run/secrets/kubernetes.io/serviceaccount/token") as fid: + token = fid.read() + return OIDCCallbackResult(access_token=token) + + + uri = os.environ["MONGODB_URI"] + props = {"OIDC_CALLBACK": MyCallback()} + c = MongoClient(uri, authMechanism="MONGODB-OIDC", authMechanismProperties=props) + c.test.test.insert_one({}) + c.close() + +.. _MONGODB-OIDC authentication mechanism: https://www.mongodb.com/docs/manual/core/security-oidc/ +.. _Azure Internal Metadata Service: https://learn.microsoft.com/en-us/azure/virtual-machines/instance-metadata-service +.. _configured on your MongoDB deployment: https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.oidcIdentityProviders +.. _GCP Internal Metadata Service: https://cloud.google.com/compute/docs/metadata/querying-metadata +.. _azure-identity package: https://pypi.org/project/azure-identity/ +.. _configured service account: https://cloud.google.com/kubernetes-engine/docs/how-to/service-accounts diff --git a/doc/examples/copydb.rst b/doc/examples/copydb.rst index 76d0c97a36..b37677b5c2 100644 --- a/doc/examples/copydb.rst +++ b/doc/examples/copydb.rst @@ -70,4 +70,4 @@ but it has been removed. http://mongodb.com/docs/manual/reference/method/db.copyDatabase/ .. _Copy a Database: - https://www.mongodb.com/docs/database-tools/mongodump/#std-label-mongodump-example-copy-clone-database + https://www.mongodb.com/docs/database-tools/mongodump/mongodump-examples/#copy-and-clone-databases diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index fb61189499..338b177be3 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -125,8 +125,8 @@ examples show how to setup automatic client-side field level encryption using :class:`~pymongo.encryption.ClientEncryption` to create a new encryption data key. -.. note:: Automatic client-side field level encryption requires MongoDB 4.2 - enterprise or a MongoDB 4.2 Atlas cluster. The community version of the +.. note:: Automatic client-side field level encryption requires MongoDB >=4.2 + enterprise or a MongoDB >=4.2 Atlas cluster. The community version of the server supports automatic decryption as well as :ref:`explicit-client-side-encryption`. @@ -255,7 +255,7 @@ will result in an error. Server-Side Field Level Encryption Enforcement `````````````````````````````````````````````` -The MongoDB 4.2 server supports using schema validation to enforce encryption +MongoDB >=4.2 servers supports using schema validation to enforce encryption of specific fields in a collection. This schema validation will prevent an application from inserting unencrypted values for any fields marked with the ``"encrypt"`` JSON schema keyword. @@ -457,8 +457,8 @@ Explicit encryption is a MongoDB community feature and does not use the Explicit Encryption with Automatic Decryption ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Although automatic encryption requires MongoDB 4.2 enterprise or a -MongoDB 4.2 Atlas cluster, automatic *decryption* is supported for all users. +Although automatic encryption requires MongoDB >=4.2 enterprise or a +MongoDB >=4.2 Atlas cluster, automatic *decryption* is supported for all users. To configure automatic *decryption* without automatic *encryption* set ``bypass_auto_encryption=True`` in :class:`~pymongo.encryption_options.AutoEncryptionOpts`: @@ -603,7 +603,7 @@ An application using GCP credentials would look like: } client_encryption.create_data_key("gcp", master_key) -The driver will query the `VM instance metadata `_ to obtain credentials. +The driver will query the `VM instance metadata `_ to obtain credentials. An application using Azure credentials would look like, this time using :class:`~pymongo.encryption_options.AutoEncryptionOpts`: diff --git a/doc/examples/index.rst b/doc/examples/index.rst index 23f7a6f181..75d208f20f 100644 --- a/doc/examples/index.rst +++ b/doc/examples/index.rst @@ -27,6 +27,7 @@ MongoDB, you can start it like so: gevent gridfs high_availability + logging mod_wsgi network_compression server_selection diff --git a/doc/examples/logging.rst b/doc/examples/logging.rst new file mode 100644 index 0000000000..0cbc8eff09 --- /dev/null +++ b/doc/examples/logging.rst @@ -0,0 +1,63 @@ +Logging +======== + +Starting in 4.8, **PyMongo** supports `Python's native logging library `_, +enabling developers to customize the verbosity of log messages for their applications. + +Components +------------- +There are currently three different **PyMongo** components with logging support: ``pymongo.command``, ``pymongo.connection``, and ``pymongo.serverSelection``. +These components deal with command operations, connection management, and server selection, respectively. +Each can be configured separately or they can all be configured together. + +Configuration +------------- +Currently, the above components each support ``DEBUG`` logging. To enable a single component, do the following:: + + import logging + logging.getLogger('pymongo.').setLevel(logging.DEBUG) + + + +For example, to enable command logging:: + + import logging + logging.getLogger('pymongo.command').setLevel(logging.DEBUG) + + +You can also enable all ``DEBUG`` logs at once:: + + import logging + logging.getLogger('pymongo').setLevel(logging.DEBUG) + + +Truncation +------------- +When ``pymongo.command`` debug logs are enabled, every command sent to the server and every response sent back will be included as part of the logs. +By default, these command and response documents are truncated after 1000 bytes. + +You can configure a higher truncation limit by setting the ``MONGOB_LOG_MAX_DOCUMENT_LENGTH`` environment variable to your desired length. + +Note that by default, only sensitive authentication command contents are redacted. +All commands containing user data will be logged, including the actual contents of your queries. +To prevent this behavior, set ``MONGOB_LOG_MAX_DOCUMENT_LENGTH`` to 0. This will omit the command and response bodies from the logs. + +Example +------------- +Here's a simple example that enables ``pymongo.command`` debug logs and performs two database operations:: + + import logging + import pymongo + + # Automatically writes all logs to stdout + logging.basicConfig() + logging.getLogger('pymongo.command').setLevel(logging.DEBUG) + + client = pymongo.MongoClient() + client.db.test.insert_one({"x": 1}) + client.db.test.find_one({"x": 1}) + --------------------------------- + DEBUG:pymongo.command:{"clientId": {"$oid": "65cbe82614be1fc2beb4e4a9"}, "message": "Command started", "command": "{\"insert\": \"test\", \"ordered\": true, \"lsid\": {\"id\": {\"$binary\": {\"base64\": \"GI7ubVhPSsWd7+OwHEFx6Q==\", \"subType\": \"04\"}}}, \"$db\": \"db\", \"documents\": [{\"x\": 1, \"_id\": {\"$oid\": \"65cbe82614be1fc2beb4e4aa\"}}]}", "commandName": "insert", "databaseName": "db", "requestId": 1144108930, "operationId": 1144108930, "driverConnectionId": 1, "serverConnectionId": 3554, "serverHost": "localhost", "serverPort": 27017} + DEBUG:pymongo.command:{"clientId": {"$oid": "65cbe82614be1fc2beb4e4a9"}, "message": "Command succeeded", "durationMS": 0.515, "reply": "{\"n\": 1, \"ok\": 1.0}", "commandName": "insert", "databaseName": "db", "requestId": 1144108930, "operationId": 1144108930, "driverConnectionId": 1, "serverConnectionId": 3554, "serverHost": "localhost", "serverPort": 27017} + DEBUG:pymongo.command:{"clientId": {"$oid": "65cbe82614be1fc2beb4e4a9"}, "message": "Command started", "command": "{\"find\": \"test\", \"filter\": {\"x\": 1}, \"limit\": 1, \"singleBatch\": true, \"lsid\": {\"id\": {\"$binary\": {\"base64\": \"GI7ubVhPSsWd7+OwHEFx6Q==\", \"subType\": \"04\"}}}, \"$db\": \"db\"}", "commandName": "find", "databaseName": "db", "requestId": 470211272, "operationId": 470211272, "driverConnectionId": 1, "serverConnectionId": 3554, "serverHost": "localhost", "serverPort": 27017} + DEBUG:pymongo.command:{"clientId": {"$oid": "65cbe82614be1fc2beb4e4a9"}, "message": "Command succeeded", "durationMS": 0.621, "reply": "{\"cursor\": {\"firstBatch\": [{\"_id\": {\"$oid\": \"65cbdf391a957ed280001417\"}, \"x\": 1}], \"ns\": \"db.test\"}, \"ok\": 1.0}", "commandName": "find", "databaseName": "db", "requestId": 470211272, "operationId": 470211272, "driverConnectionId": 1, "serverConnectionId": 3554, "serverHost": "localhost", "serverPort": 27017} diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 557ee7d9b9..9241ac23e7 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -32,7 +32,7 @@ MongoDB. You can read more about TLS versions and their security implications here: - ``_ + ``_ .. _python.org: https://www.python.org/downloads/ .. _homebrew: https://brew.sh/ diff --git a/doc/faq.rst b/doc/faq.rst index 2d211c756c..f0463badaa 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -166,7 +166,7 @@ they are returned to the pool. Does PyMongo support Python 3? ------------------------------ -PyMongo supports CPython 3.7+ and PyPy3.8+. See the :doc:`python3` for details. +PyMongo supports CPython 3.8+ and PyPy3.9+. See the :doc:`python3` for details. Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or Twisted? --------------------------------------------------------------------------------------- diff --git a/doc/index.rst b/doc/index.rst index 2f0ba1d36a..f2797eb736 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -31,6 +31,9 @@ everything you need to know to use **PyMongo**. :doc:`examples/type_hints` Using PyMongo with type hints. +:doc:`examples/logging` + Using PyMongo's logging capabilities. + :doc:`faq` Some questions that come up often. diff --git a/doc/installation.rst b/doc/installation.rst index edbdc0ac63..ee83b30c6f 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -28,7 +28,7 @@ To upgrade using pip:: Dependencies ------------ -PyMongo supports CPython 3.7+ and PyPy3.7+. +PyMongo supports CPython 3.8+ and PyPy3.9+. Required dependencies ..................... @@ -140,7 +140,7 @@ See `http://bugs.python.org/issue11623 `_ for a more detailed explanation. **Lion (10.7) and newer** - PyMongo's C extensions can be built against -versions of Python 3.7+ downloaded from python.org. In all cases Xcode must be +versions of Python 3.8+ downloaded from python.org. In all cases Xcode must be installed with 'UNIX Development Support'. **Xcode 5.1**: Starting with version 5.1 the version of clang that ships with diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 35fc922d51..bc6da85560 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -328,13 +328,13 @@ Removed :meth:`pymongo.database.Database.collection_names`. Use :meth:`~pymongo.database.Database.list_collection_names` instead. Code like this:: - names = client.collection_names() - non_system_names = client.collection_names(include_system_collections=False) + names = client.db.collection_names() + non_system_names = client.db.collection_names(include_system_collections=False) can be changed to this:: - names = client.list_collection_names() - non_system_names = client.list_collection_names(filter={"name": {"$regex": r"^(?!system\\.)"}}) + names = client.db.list_collection_names() + non_system_names = client.db.list_collection_names(filter={"name": {"$regex": "^(?!system\\.)"}}) Database.current_op is removed .............................. @@ -637,12 +637,10 @@ Collection.group is removed ........................... Removed :meth:`pymongo.collection.Collection.group`. This method was -deprecated in PyMongo 3.5. MongoDB 4.2 removed the `group command`_. +deprecated in PyMongo 3.5. MongoDB 4.2 removed the group command. Use :meth:`~pymongo.collection.Collection.aggregate` with the ``$group`` stage instead. -.. _group command: https://mongodb.com/docs/manual/reference/command/group/ - Collection.map_reduce and Collection.inline_map_reduce are removed .................................................................. @@ -940,9 +938,7 @@ Collection.parallel_scan is removed ................................... Removed :meth:`~pymongo.collection.Collection.parallel_scan`. MongoDB 4.2 -removed the `parallelCollectionScan command`_. There is no replacement. - -.. _parallelCollectionScan command: https://mongodb.com/docs/manual/reference/command/parallelCollectionScan/ +removed the parallelCollectionScan command. There is no replacement. pymongo.message helpers are removed ................................... diff --git a/doc/python3.rst b/doc/python3.rst index cc11409bcf..148c5ee454 100644 --- a/doc/python3.rst +++ b/doc/python3.rst @@ -4,7 +4,7 @@ Python 3 FAQ What Python 3 versions are supported? ------------------------------------- -PyMongo supports CPython 3.7+ and PyPy3.8+. +PyMongo supports CPython 3.8+ and PyPy3.9+. Are there any PyMongo behavior changes with Python 3? ----------------------------------------------------- diff --git a/doc/tools.rst b/doc/tools.rst index 7aea84cb0c..6dd0df8a4d 100644 --- a/doc/tools.rst +++ b/doc/tools.rst @@ -44,7 +44,7 @@ MincePy workflow as possible. Ming - `Ming `_ (the Merciless) is a + `Ming `_ is a library that allows you to enforce schemas on a MongoDB database in your Python application. It was developed by `SourceForge `_ in the course of their migration to diff --git a/green_framework_test.py b/green_framework_test.py index 01f72b245a..65025798cf 100644 --- a/green_framework_test.py +++ b/green_framework_test.py @@ -98,7 +98,8 @@ def main(): sys.exit(1) run( - args[0], *args[1:] # Framework name. + args[0], + *args[1:], # Framework name. ) # Command line args to pytest, like what test to run. diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 63aa40623a..8d01fefce8 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -63,9 +63,8 @@ def __init__(self, database: Database, collection: str = "fs"): Raises :class:`TypeError` if `database` is not an instance of :class:`~pymongo.database.Database`. - :Parameters: - - `database`: database to use - - `collection` (optional): root collection to use + :param database: database to use + :param collection: root collection to use .. versionchanged:: 4.0 Removed the `disable_md5` parameter. See @@ -110,8 +109,7 @@ def new_file(self, **kwargs: Any) -> GridIn: not already exist in GridFS. Otherwise :class:`~gridfs.errors.FileExists` is raised. - :Parameters: - - `**kwargs` (optional): keyword arguments for file creation + :param kwargs: keyword arguments for file creation """ return GridIn(self.__collection, **kwargs) @@ -135,9 +133,8 @@ def put(self, data: Any, **kwargs: Any) -> Any: not already exist in GridFS. Otherwise :class:`~gridfs.errors.FileExists` is raised. - :Parameters: - - `data`: data to be written as a file. - - `**kwargs` (optional): keyword arguments for file creation + :param data: data to be written as a file. + :param kwargs: keyword arguments for file creation .. versionchanged:: 3.0 w=0 writes to GridFS are now prohibited. @@ -152,9 +149,8 @@ def get(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: Returns an instance of :class:`~gridfs.grid_file.GridOut`, which provides a file-like interface for reading. - :Parameters: - - `file_id`: ``"_id"`` of the file to get - - `session` (optional): a + :param file_id: ``"_id"`` of the file to get + :param session: a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 @@ -193,13 +189,12 @@ def get_version( Raises :class:`~gridfs.errors.NoFile` if no such version of that file exists. - :Parameters: - - `filename`: ``"filename"`` of the file to get, or `None` - - `version` (optional): version of the file to get (defaults + :param filename: ``"filename"`` of the file to get, or `None` + :param version: version of the file to get (defaults to -1, the most recent version uploaded) - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession` - - `**kwargs` (optional): find files by custom metadata. + :param kwargs: find files by custom metadata. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -235,11 +230,10 @@ def get_last_version( Equivalent to calling :meth:`get_version` with the default `version` (``-1``). - :Parameters: - - `filename`: ``"filename"`` of the file to get, or `None` - - `session` (optional): a + :param filename: ``"filename"`` of the file to get, or `None` + :param session: a :class:`~pymongo.client_session.ClientSession` - - `**kwargs` (optional): find files by custom metadata. + :param kwargs: find files by custom metadata. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -261,9 +255,8 @@ def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: .. note:: Deletes of non-existent files are considered successful since the end result is the same: no file with that _id remains. - :Parameters: - - `file_id`: ``"_id"`` of the file to delete - - `session` (optional): a + :param file_id: ``"_id"`` of the file to delete + :param session: a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 @@ -280,8 +273,7 @@ def list(self, session: Optional[ClientSession] = None) -> list[str]: """List the names of all files stored in this instance of :class:`GridFS`. - :Parameters: - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 @@ -309,19 +301,20 @@ def find_one( All arguments to :meth:`find` are also valid arguments for :meth:`find_one`, although any `limit` argument will be ignored. Returns a single :class:`~gridfs.grid_file.GridOut`, - or ``None`` if no matching file is found. For example:: + or ``None`` if no matching file is found. For example: + + .. code-block: python file = fs.find_one({"filename": "lisa.txt"}) - :Parameters: - - `filter` (optional): a dictionary specifying + :param filter: a dictionary specifying the query to be performing OR any other type to be used as the value for a query for ``"_id"`` in the file collection. - - `*args` (optional): any additional positional arguments are + :param args: any additional positional arguments are the same as the arguments to :meth:`find`. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession` - - `**kwargs` (optional): any additional keyword arguments + :param kwargs: any additional keyword arguments are the same as the arguments to :meth:`find`. .. versionchanged:: 3.6 @@ -367,20 +360,19 @@ def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances are associated with that session. - :Parameters: - - `filter` (optional): A query document that selects which files + :param filter: A query document that selects which files to include in the result set. Can be an empty document to include all files. - - `skip` (optional): the number of files to omit (from + :param skip: the number of files to omit (from the start of the result set) when returning the results - - `limit` (optional): the maximum number of results to + :param limit: the maximum number of results to return - - `no_cursor_timeout` (optional): if False (the default), any + :param no_cursor_timeout: if False (the default), any returned cursor is closed by the server after 10 minutes of inactivity. If set to True, the returned cursor will never time out on the server. Care should be taken to ensure that cursors with no_cursor_timeout turned on are properly closed. - - `sort` (optional): a list of (key, direction) pairs + :param sort: a list of (key, direction) pairs specifying the sort order for this query. See :meth:`~pymongo.cursor.Cursor.sort` for details. @@ -429,12 +421,11 @@ def exists( create appropriate indexes; application developers should be sure to create indexes if needed and as appropriate. - :Parameters: - - `document_or_id` (optional): query document, or _id of the + :param document_or_id: query document, or _id of the document to check for - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession` - - `**kwargs` (optional): keyword arguments are used as a + :param kwargs: keyword arguments are used as a query document, if they're present. .. versionchanged:: 3.6 @@ -468,15 +459,14 @@ def __init__( Raises :exc:`~pymongo.errors.ConfigurationError` if `write_concern` is not acknowledged. - :Parameters: - - `database`: database to use. - - `bucket_name` (optional): The name of the bucket. Defaults to 'fs'. - - `chunk_size_bytes` (optional): The chunk size in bytes. Defaults + :param database: database to use. + :param bucket_name: The name of the bucket. Defaults to 'fs'. + :param chunk_size_bytes: The chunk size in bytes. Defaults to 255KB. - - `write_concern` (optional): The + :param write_concern: The :class:`~pymongo.write_concern.WriteConcern` to use. If ``None`` (the default) db.write_concern is used. - - `read_preference` (optional): The read preference to use. If + :param read_preference: The read preference to use. If ``None`` (the default) db.read_preference is used. .. versionchanged:: 4.0 @@ -545,14 +535,13 @@ def open_upload_stream( that file exists. Raises :exc:`~ValueError` if `filename` is not a string. - :Parameters: - - `filename`: The name of the file to upload. - - `chunk_size_bytes` (options): The number of bytes per chunk of this + :param filename: The name of the file to upload. + :param chunk_size_bytes` (options): The number of bytes per chunk of this file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. - - `metadata` (optional): User data for the 'metadata' field of the + :param metadata: User data for the 'metadata' field of the files collection document. If not provided the metadata field will be omitted from the files collection document. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 @@ -603,16 +592,15 @@ def open_upload_stream_with_id( that file exists. Raises :exc:`~ValueError` if `filename` is not a string. - :Parameters: - - `file_id`: The id to use for this file. The id must not have + :param file_id: The id to use for this file. The id must not have already been used for another file. - - `filename`: The name of the file to upload. - - `chunk_size_bytes` (options): The number of bytes per chunk of this + :param filename: The name of the file to upload. + :param chunk_size_bytes` (options): The number of bytes per chunk of this file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. - - `metadata` (optional): User data for the 'metadata' field of the + :param metadata: User data for the 'metadata' field of the files collection document. If not provided the metadata field will be omitted from the files collection document. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 @@ -661,16 +649,15 @@ def upload_from_stream( that file exists. Raises :exc:`~ValueError` if `filename` is not a string. - :Parameters: - - `filename`: The name of the file to upload. - - `source`: The source stream of the content to be uploaded. Must be + :param filename: The name of the file to upload. + :param source: The source stream of the content to be uploaded. Must be a file-like object that implements :meth:`read` or a string. - - `chunk_size_bytes` (options): The number of bytes per chunk of this + :param chunk_size_bytes` (options): The number of bytes per chunk of this file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. - - `metadata` (optional): User data for the 'metadata' field of the + :param metadata: User data for the 'metadata' field of the files collection document. If not provided the metadata field will be omitted from the files collection document. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 @@ -710,18 +697,17 @@ def upload_from_stream_with_id( that file exists. Raises :exc:`~ValueError` if `filename` is not a string. - :Parameters: - - `file_id`: The id to use for this file. The id must not have + :param file_id: The id to use for this file. The id must not have already been used for another file. - - `filename`: The name of the file to upload. - - `source`: The source stream of the content to be uploaded. Must be + :param filename: The name of the file to upload. + :param source: The source stream of the content to be uploaded. Must be a file-like object that implements :meth:`read` or a string. - - `chunk_size_bytes` (options): The number of bytes per chunk of this + :param chunk_size_bytes` (options): The number of bytes per chunk of this file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. - - `metadata` (optional): User data for the 'metadata' field of the + :param metadata: User data for the 'metadata' field of the files collection document. If not provided the metadata field will be omitted from the files collection document. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 @@ -751,9 +737,8 @@ def open_download_stream( Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. - :Parameters: - - `file_id`: The _id of the file to be downloaded. - - `session` (optional): a + :param file_id: The _id of the file to be downloaded. + :param session: a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 @@ -786,10 +771,9 @@ def download_to_stream( Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. - :Parameters: - - `file_id`: The _id of the file to be downloaded. - - `destination`: a file-like object implementing :meth:`write`. - - `session` (optional): a + :param file_id: The _id of the file to be downloaded. + :param destination: a file-like object implementing :meth:`write`. + :param session: a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 @@ -817,9 +801,8 @@ def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. - :Parameters: - - `file_id`: The _id of the file to be deleted. - - `session` (optional): a + :param file_id: The _id of the file to be deleted. + :param session: a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 @@ -864,17 +847,16 @@ def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances are associated with that session. - :Parameters: - - `filter`: Search query. - - `batch_size` (optional): The number of documents to return per + :param filter: Search query. + :param batch_size: The number of documents to return per batch. - - `limit` (optional): The maximum number of documents to return. - - `no_cursor_timeout` (optional): The server normally times out idle + :param limit: The maximum number of documents to return. + :param no_cursor_timeout: The server normally times out idle cursors after an inactivity period (10 minutes) to prevent excess memory use. Set this option to True prevent that. - - `skip` (optional): The number of documents to skip before + :param skip: The number of documents to skip before returning. - - `sort` (optional): The order by which to sort results. Defaults to + :param sort: The order by which to sort results. Defaults to None. """ return GridOutCursor(self._collection, *args, **kwargs) @@ -899,12 +881,11 @@ def open_download_stream_by_name( Raises :exc:`~ValueError` filename is not a string. - :Parameters: - - `filename`: The name of the file to read from. - - `revision` (optional): Which revision (documents with the same + :param filename: The name of the file to read from. + :param revision: Which revision (documents with the same filename and different uploadDate) of the file to retrieve. Defaults to -1 (the most recent revision). - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession` :Note: Revision numbers are defined as follows: @@ -958,13 +939,12 @@ def download_to_stream_by_name( Raises :exc:`~ValueError` if `filename` is not a string. - :Parameters: - - `filename`: The name of the file to read from. - - `destination`: A file-like object that implements :meth:`write`. - - `revision` (optional): Which revision (documents with the same + :param filename: The name of the file to read from. + :param destination: A file-like object that implements :meth:`write`. + :param revision: Which revision (documents with the same filename and different uploadDate) of the file to retrieve. Defaults to -1 (the most recent revision). - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession` :Note: Revision numbers are defined as follows: @@ -1001,10 +981,9 @@ def rename( Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. - :Parameters: - - `file_id`: The _id of the file to be renamed. - - `new_filename`: The new name of the file. - - `session` (optional): a + :param file_id: The _id of the file to be renamed. + :param new_filename: The new name of the file. + :param session: a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 685d097494..ac72c144b7 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -19,24 +19,26 @@ import io import math import os +import warnings from typing import Any, Iterable, Mapping, NoReturn, Optional -from bson.binary import Binary from bson.int64 import Int64 from bson.objectid import ObjectId -from bson.son import SON from gridfs.errors import CorruptGridFile, FileExists, NoFile from pymongo import ASCENDING from pymongo.client_session import ClientSession from pymongo.collection import Collection +from pymongo.common import MAX_MESSAGE_SIZE from pymongo.cursor import Cursor from pymongo.errors import ( + BulkWriteError, ConfigurationError, CursorNotFound, DuplicateKeyError, InvalidOperation, OperationFailure, ) +from pymongo.helpers import _check_write_command_response from pymongo.read_preferences import ReadPreference _SEEK_SET = os.SEEK_SET @@ -49,9 +51,16 @@ """Default chunk size, in bytes.""" # Slightly under a power of 2, to work well with server's record allocations. DEFAULT_CHUNK_SIZE = 255 * 1024 +# The number of chunked bytes to buffer before calling insert_many. +_UPLOAD_BUFFER_SIZE = MAX_MESSAGE_SIZE +# The number of chunk documents to buffer before calling insert_many. +_UPLOAD_BUFFER_CHUNKS = 100000 +# Rough BSON overhead of a chunk document not including the chunk data itself. +# Essentially len(encode({"_id": ObjectId(), "files_id": ObjectId(), "n": 1, "data": ""})) +_CHUNK_OVERHEAD = 60 -_C_INDEX: SON[str, Any] = SON([("files_id", ASCENDING), ("n", ASCENDING)]) -_F_INDEX: SON[str, Any] = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)]) +_C_INDEX: dict[str, Any] = {"files_id": ASCENDING, "n": ASCENDING} +_F_INDEX: dict[str, Any] = {"filename": ASCENDING, "uploadDate": ASCENDING} def _grid_in_property( @@ -61,8 +70,15 @@ def _grid_in_property( closed_only: Optional[bool] = False, ) -> Any: """Create a GridIn property.""" + warn_str = "" + if docstring.startswith("DEPRECATED,"): + warn_str = ( + f"GridIn property '{field_name}' is deprecated and will be removed in PyMongo 5.0" + ) def getter(self: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) if closed_only and not self._closed: raise AttributeError("can only get %r on a closed file" % field_name) # Protect against PHP-237 @@ -71,6 +87,8 @@ def getter(self: Any) -> Any: return self._file.get(field_name, None) def setter(self: Any, value: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) if self._closed: self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}}) self._file[field_name] = value @@ -92,8 +110,15 @@ def setter(self: Any, value: Any) -> Any: def _grid_out_property(field_name: str, docstring: str) -> Any: """Create a GridOut property.""" + warn_str = "" + if docstring.startswith("DEPRECATED,"): + warn_str = ( + f"GridOut property '{field_name}' is deprecated and will be removed in PyMongo 5.0" + ) def getter(self: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) self._ensure_file() # Protect against PHP-237 @@ -152,12 +177,11 @@ def __init__( - ``"encoding"``: encoding used for this file. Any :class:`str` that is written to the file will be converted to :class:`bytes`. - :Parameters: - - `root_collection`: root collection to write to - - `session` (optional): a + :param root_collection: root collection to write to + :param session: a :class:`~pymongo.client_session.ClientSession` to use for all commands - - `**kwargs: Any` (optional): file level options (see above) + :param kwargs: Any: file level options (see above) .. versionchanged:: 4.0 Removed the `disable_md5` parameter. See @@ -200,6 +224,8 @@ def __init__( object.__setattr__(self, "_chunk_number", 0) object.__setattr__(self, "_closed", False) object.__setattr__(self, "_ensured_index", False) + object.__setattr__(self, "_buffered_docs", []) + object.__setattr__(self, "_buffered_docs_size", 0) def __create_index(self, collection: Collection, index_key: Any, unique: bool) -> None: doc = collection.find_one(projection={"_id": 1}, session=self._session) @@ -251,6 +277,8 @@ def closed(self) -> bool: _buffer: io.BytesIO _closed: bool + _buffered_docs: list[dict[str, Any]] + _buffered_docs_size: int def __getattr__(self, name: str) -> Any: if name in self._file: @@ -270,32 +298,52 @@ def __setattr__(self, name: str, value: Any) -> None: if self._closed: self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) - def __flush_data(self, data: Any) -> None: + def __flush_data(self, data: Any, force: bool = False) -> None: """Flush `data` to a chunk.""" self.__ensure_indexes() - if not data: - return assert len(data) <= self.chunk_size - - chunk = {"files_id": self._file["_id"], "n": self._chunk_number, "data": Binary(data)} - - try: - self._chunks.insert_one(chunk, session=self._session) - except DuplicateKeyError: - self._raise_file_exists(self._file["_id"]) + if data: + self._buffered_docs.append( + {"files_id": self._file["_id"], "n": self._chunk_number, "data": data} + ) + self._buffered_docs_size += len(data) + _CHUNK_OVERHEAD + if not self._buffered_docs: + return + # Limit to 100,000 chunks or 32MB (+1 chunk) of data. + if ( + force + or self._buffered_docs_size >= _UPLOAD_BUFFER_SIZE + or len(self._buffered_docs) >= _UPLOAD_BUFFER_CHUNKS + ): + try: + self._chunks.insert_many(self._buffered_docs, session=self._session) + except BulkWriteError as exc: + # For backwards compatibility, raise an insert_one style exception. + write_errors = exc.details["writeErrors"] + for err in write_errors: + if err.get("code") in (11000, 11001, 12582): # Duplicate key errors + self._raise_file_exists(self._file["_id"]) + result = {"writeErrors": write_errors} + wces = exc.details["writeConcernErrors"] + if wces: + result["writeConcernError"] = wces[-1] + _check_write_command_response(result) + raise + self._buffered_docs = [] + self._buffered_docs_size = 0 self._chunk_number += 1 self._position += len(data) - def __flush_buffer(self) -> None: + def __flush_buffer(self, force: bool = False) -> None: """Flush the buffer contents out to a chunk.""" - self.__flush_data(self._buffer.getvalue()) + self.__flush_data(self._buffer.getvalue(), force=force) self._buffer.close() self._buffer = io.BytesIO() def __flush(self) -> Any: """Flush the file to the database.""" try: - self.__flush_buffer() + self.__flush_buffer(force=True) # The GridFS spec says length SHOULD be an Int64. self._file["length"] = Int64(self._position) self._file["uploadDate"] = datetime.datetime.now(tz=datetime.timezone.utc) @@ -344,8 +392,7 @@ def write(self, data: Any) -> None: Unicode data is only allowed if the file has an :attr:`encoding` attribute. - :Parameters: - - `data`: string of bytes or file-like object to be written + :param data: string of bytes or file-like object to be written to the file """ if self._closed: @@ -438,12 +485,11 @@ def __init__( :class:`TypeError` if `root_collection` is not an instance of :class:`~pymongo.collection.Collection`. - :Parameters: - - `root_collection`: root collection to read from - - `file_id` (optional): value of ``"_id"`` for the file to read - - `file_document` (optional): file document from + :param root_collection: root collection to read from + :param file_id: value of ``"_id"`` for the file to read + :param file_document: file document from `root_collection.files` - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession` to use for all commands @@ -608,8 +654,7 @@ def read(self, size: int = -1) -> bytes: The bytes are returned as an instance of :class:`bytes` If `size` is negative or omitted all data is read. - :Parameters: - - `size` (optional): the number of bytes to read + :param size: the number of bytes to read .. versionchanged:: 3.8 This method now only checks for extra chunks after reading the @@ -621,8 +666,7 @@ def read(self, size: int = -1) -> bytes: def readline(self, size: int = -1) -> bytes: # type: ignore[override] """Read one line or up to `size` bytes from the file. - :Parameters: - - `size` (optional): the maximum number of bytes to read + :param size: the maximum number of bytes to read """ return self._read_size_or_line(size=size, line=True) @@ -633,10 +677,9 @@ def tell(self) -> int: def seek(self, pos: int, whence: int = _SEEK_SET) -> int: """Set the current position of this file. - :Parameters: - - `pos`: the position (or offset if using relative + :param pos: the position (or offset if using relative positioning) to seek to - - `whence` (optional): where to seek + :param whence: where to seek from. :attr:`os.SEEK_SET` (``0``) for absolute file positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative to the current position, :attr:`os.SEEK_END` (``2``) to diff --git a/hatch_build.py b/hatch_build.py new file mode 100644 index 0000000000..91315eb09f --- /dev/null +++ b/hatch_build.py @@ -0,0 +1,36 @@ +"""A custom hatch build hook for pymongo.""" +from __future__ import annotations + +import os +import subprocess +import sys +from pathlib import Path + +from hatchling.builders.hooks.plugin.interface import BuildHookInterface + + +class CustomHook(BuildHookInterface): + """The pymongo build hook.""" + + def initialize(self, version, build_data): + """Initialize the hook.""" + if self.target_name == "sdist": + return + here = Path(__file__).parent.resolve() + sys.path.insert(0, str(here)) + + subprocess.check_call([sys.executable, "_setup.py", "build_ext", "-i"]) + + # Ensure wheel is marked as binary and contains the binary files. + build_data["infer_tag"] = True + build_data["pure_python"] = False + if os.name == "nt": + patt = ".pyd" + else: + patt = ".so" + for pkg in ["bson", "pymongo"]: + dpath = here / pkg + for fpath in dpath.glob(f"*{patt}"): + relpath = os.path.relpath(fpath, here) + build_data["artifacts"].append(relpath) + build_data["force_include"][relpath] = relpath diff --git a/mypy.ini b/mypy.ini deleted file mode 100644 index 5fd52aa7c1..0000000000 --- a/mypy.ini +++ /dev/null @@ -1,40 +0,0 @@ -[mypy] -python_version = 3.7 -check_untyped_defs = true -disallow_subclassing_any = true -disallow_incomplete_defs = true -no_implicit_optional = true -pretty = true -show_error_context = true -show_error_codes = true -strict_equality = true -warn_unused_configs = true -warn_unused_ignores = true -warn_redundant_casts = true - -[mypy-gevent.*] -ignore_missing_imports = True - -[mypy-kerberos.*] -ignore_missing_imports = True - -[mypy-mockupdb] -ignore_missing_imports = True - -[mypy-pymongo_auth_aws.*] -ignore_missing_imports = True - -[mypy-pymongocrypt.*] -ignore_missing_imports = True - -[mypy-service_identity.*] -ignore_missing_imports = True - -[mypy-snappy.*] -ignore_missing_imports = True - -[mypy-test.test_typing] -warn_unused_ignores = True - -[mypy-winkerberos.*] -ignore_missing_imports = True diff --git a/mypy_test.ini b/mypy_test.ini new file mode 100644 index 0000000000..c3566c3bfc --- /dev/null +++ b/mypy_test.ini @@ -0,0 +1,8 @@ +[mypy] +strict = true +show_error_codes = true +disable_error_code = attr-defined, union-attr, var-annotated, assignment, no-redef, type-arg, import, no-untyped-call, no-untyped-def, index, no-any-return, misc +exclude = (?x)( + ^test/mypy_fails/*.*$ + | ^test/conftest.py$ + ) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index cdcbe5a5a0..758bb33ac8 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -159,11 +159,9 @@ def timeout(seconds: Optional[float]) -> ContextManager[None]: coll.find_one() # Still uses the original 5 second deadline. coll.find_one() # Uses the original 5 second deadline. - :Parameters: - - `seconds`: A non-negative floating point number expressing seconds, or None. + :param seconds: A non-negative floating point number expressing seconds, or None. - :Raises: - - :py:class:`ValueError`: When `seconds` is negative. + :raises: :py:class:`ValueError`: When `seconds` is negative. See :ref:`timeout-example` for more examples. diff --git a/pymongo/_azure_helpers.py b/pymongo/_azure_helpers.py new file mode 100644 index 0000000000..704c561cd5 --- /dev/null +++ b/pymongo/_azure_helpers.py @@ -0,0 +1,57 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Azure helpers.""" +from __future__ import annotations + +import json +from typing import Any, Optional + + +def _get_azure_response( + resource: str, client_id: Optional[str] = None, timeout: float = 5 +) -> dict[str, Any]: + # Deferred import to save overall import time. + from urllib.request import Request, urlopen + + url = "http://169.254.169.254/metadata/identity/oauth2/token" + url += "?api-version=2018-02-01" + url += f"&resource={resource}" + if client_id: + url += f"&client_id={client_id}" + headers = {"Metadata": "true", "Accept": "application/json"} + request = Request(url, headers=headers) # noqa: S310 + try: + with urlopen(request, timeout=timeout) as response: # noqa: S310 + status = response.status + body = response.read().decode("utf8") + except Exception as e: + msg = "Failed to acquire IMDS access token: %s" % e + raise ValueError(msg) from None + + if status != 200: + msg = "Failed to acquire IMDS access token." + raise ValueError(msg) + try: + data = json.loads(body) + except Exception: + raise ValueError("Azure IMDS response must be in JSON format.") from None + + for key in ["access_token", "expires_in"]: + if not data.get(key): + msg = "Azure IMDS response must contain %s, but was %s." + msg = msg % (key, body) + raise ValueError(msg) + + return data diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 7ac66a1e4b..f95b949380 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -68,8 +68,6 @@ static int buffer_write_bytes_ssize_t(buffer_t buffer, const char* data, Py_ssiz static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { /* NOTE just using a random number as the request_id */ - struct module_state *state = GETSTATE(self); - int request_id = rand(); unsigned int flags; char* collection_name = NULL; @@ -84,6 +82,10 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { buffer_t buffer = NULL; int length_location, message_length; PyObject* result = NULL; + struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } if (!(PyArg_ParseTuple(args, "Iet#iiOOO", &flags, @@ -216,8 +218,6 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { * only checked *after* generating the entire message. */ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { - struct module_state *state = GETSTATE(self); - /* NOTE just using a random number as the request_id */ int request_id = rand(); unsigned int flags; @@ -234,6 +234,10 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { int max_doc_size = 0; PyObject* result = NULL; PyObject* iterator = NULL; + struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } /*flags, command, identifier, docs, opts*/ if (!(PyArg_ParseTuple(args, "IOet#OO", @@ -540,6 +544,9 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } if (!(PyArg_ParseTuple(args, "bOObOO", &op, &command, &docs, &ack, @@ -594,6 +601,9 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } if (!(PyArg_ParseTuple(args, "bOObOO", &op, &command, &docs, &ack, @@ -867,6 +877,9 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } if (!(PyArg_ParseTuple(args, "et#bOOOO", "utf-8", &ns, &ns_len, &op, &command, &docs, @@ -925,43 +938,41 @@ static PyMethodDef _CMessageMethods[] = { {NULL, NULL, 0, NULL} }; -#define INITERROR return NULL +#define INITERROR return -1; static int _cmessage_traverse(PyObject *m, visitproc visit, void *arg) { - Py_VISIT(GETSTATE(m)->_cbson); - Py_VISIT(GETSTATE(m)->_max_bson_size_str); - Py_VISIT(GETSTATE(m)->_max_message_size_str); - Py_VISIT(GETSTATE(m)->_max_split_size_str); - Py_VISIT(GETSTATE(m)->_max_write_batch_size_str); + struct module_state *state = GETSTATE(m); + if (!state) { + return 0; + } + Py_VISIT(state->_cbson); + Py_VISIT(state->_max_bson_size_str); + Py_VISIT(state->_max_message_size_str); + Py_VISIT(state->_max_split_size_str); + Py_VISIT(state->_max_write_batch_size_str); return 0; } static int _cmessage_clear(PyObject *m) { - Py_CLEAR(GETSTATE(m)->_cbson); - Py_CLEAR(GETSTATE(m)->_max_bson_size_str); - Py_CLEAR(GETSTATE(m)->_max_message_size_str); - Py_CLEAR(GETSTATE(m)->_max_split_size_str); - Py_CLEAR(GETSTATE(m)->_max_write_batch_size_str); + struct module_state *state = GETSTATE(m); + if (!state) { + return 0; + } + Py_CLEAR(state->_cbson); + Py_CLEAR(state->_max_bson_size_str); + Py_CLEAR(state->_max_message_size_str); + Py_CLEAR(state->_max_split_size_str); + Py_CLEAR(state->_max_write_batch_size_str); return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_cmessage", - NULL, - sizeof(struct module_state), - _CMessageMethods, - NULL, - _cmessage_traverse, - _cmessage_clear, - NULL -}; - -PyMODINIT_FUNC -PyInit__cmessage(void) +/* Multi-phase extension module initialization code. + * See https://peps.python.org/pep-0489/. +*/ +static int +_cmessage_exec(PyObject *m) { PyObject *_cbson = NULL; PyObject *c_api_object = NULL; - PyObject *m = NULL; struct module_state* state = NULL; /* Store a reference to the _cbson module since it's needed to call some @@ -984,13 +995,10 @@ PyInit__cmessage(void) goto fail; } - /* Returns a new reference. */ - m = PyModule_Create(&moduledef); - if (m == NULL) { + state = GETSTATE(m); + if (state == NULL) { goto fail; } - - state = GETSTATE(m); state->_cbson = _cbson; if (!((state->_max_bson_size_str = PyUnicode_FromString("max_bson_size")) && (state->_max_message_size_str = PyUnicode_FromString("max_message_size")) && @@ -1000,8 +1008,7 @@ PyInit__cmessage(void) } Py_DECREF(c_api_object); - - return m; + return 0; fail: Py_XDECREF(m); @@ -1009,3 +1016,31 @@ PyInit__cmessage(void) Py_XDECREF(_cbson); INITERROR; } + + +static PyModuleDef_Slot _cmessage_slots[] = { + {Py_mod_exec, _cmessage_exec}, +#ifdef Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED}, +#endif + {0, NULL}, +}; + + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_cmessage", + NULL, + sizeof(struct module_state), + _CMessageMethods, + _cmessage_slots, + _cmessage_traverse, + _cmessage_clear, + NULL +}; + +PyMODINIT_FUNC +PyInit__cmessage(void) +{ + return PyModuleDef_Init(&moduledef); +} diff --git a/pymongo/_csot.py b/pymongo/_csot.py index 6fad86f9e0..194cbad48f 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -21,9 +21,10 @@ from collections import deque from contextlib import AbstractContextManager from contextvars import ContextVar, Token -from typing import Any, Callable, Deque, MutableMapping, Optional, TypeVar, cast +from typing import TYPE_CHECKING, Any, Callable, Deque, MutableMapping, Optional, TypeVar, cast -from pymongo.write_concern import WriteConcern +if TYPE_CHECKING: + from pymongo.write_concern import WriteConcern TIMEOUT: ContextVar[Optional[float]] = ContextVar("TIMEOUT", default=None) RTT: ContextVar[float] = ContextVar("RTT", default=0.0) diff --git a/pymongo/_gcp_helpers.py b/pymongo/_gcp_helpers.py new file mode 100644 index 0000000000..d90f3cc217 --- /dev/null +++ b/pymongo/_gcp_helpers.py @@ -0,0 +1,40 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GCP helpers.""" +from __future__ import annotations + +from typing import Any + + +def _get_gcp_response(resource: str, timeout: float = 5) -> dict[str, Any]: + from urllib.request import Request, urlopen + + url = "http://metadata/computeMetadata/v1/instance/service-accounts/default/identity" + url += f"?audience={resource}" + headers = {"Metadata-Flavor": "Google"} + request = Request(url, headers=headers) # noqa: S310 + try: + with urlopen(request, timeout=timeout) as response: # noqa: S310 + status = response.status + body = response.read().decode("utf8") + except Exception as e: + msg = "Failed to acquire IMDS access token: %s" % e + raise ValueError(msg) from None + + if status != 200: + msg = "Failed to acquire IMDS access token." + raise ValueError(msg) + + return dict(access_token=body) diff --git a/pymongo/_version.py b/pymongo/_version.py index c1b30dc88f..b89e98d5be 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,16 +15,29 @@ """Current version of PyMongo.""" from __future__ import annotations -from typing import Tuple, Union +import re +from typing import List, Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 6, 0) +__version__ = "4.8.0" -def get_version_string() -> str: - if isinstance(version_tuple[-1], str): - return ".".join(map(str, version_tuple[:-1])) + version_tuple[-1] - return ".".join(map(str, version_tuple)) +def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: + pattern = r"(?P\d+).(?P\d+).(?P\d+)(?P.*)" + match = re.match(pattern, version) + if match: + parts: List[Union[int, str]] = [int(match[part]) for part in ["major", "minor", "patch"]] + if match["rest"]: + parts.append(match["rest"]) + elif re.match(r"\d+.\d+", version): + parts = [int(part) for part in version.split(".")] + else: + raise ValueError("Could not parse version") + return tuple(parts) -__version__: str = get_version_string() +version_tuple = get_version_tuple(__version__) version = __version__ + + +def get_version_string() -> str: + return __version__ diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index ef6af1092e..574db10aca 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -18,7 +18,6 @@ from collections.abc import Callable, Mapping, MutableMapping from typing import TYPE_CHECKING, Any, Optional, Union -from bson.son import SON from pymongo import common from pymongo.collation import validate_collation_or_none from pymongo.errors import ConfigurationError @@ -122,7 +121,6 @@ def _database(self) -> Database: def get_read_preference( self, session: Optional[ClientSession] ) -> Union[_AggWritePref, _ServerMode]: - if self._write_preference: return self._write_preference pref = self._target._read_preference_for(session) @@ -138,7 +136,7 @@ def get_cursor( read_preference: _ServerMode, ) -> CommandCursor[_DocumentType]: # Serialize command. - cmd = SON([("aggregate", self._aggregation_target), ("pipeline", self._pipeline)]) + cmd = {"aggregate": self._aggregation_target, "pipeline": self._pipeline} cmd.update(self._options) # Apply this target's read concern if: diff --git a/pymongo/auth.py b/pymongo/auth.py index 58fc36d051..8bc4145abc 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -36,9 +36,15 @@ from urllib.parse import quote from bson.binary import Binary -from bson.son import SON from pymongo.auth_aws import _authenticate_aws -from pymongo.auth_oidc import _authenticate_oidc, _get_authenticator, _OIDCProperties +from pymongo.auth_oidc import ( + _authenticate_oidc, + _get_authenticator, + _OIDCAzureCallback, + _OIDCGCPCallback, + _OIDCProperties, + _OIDCTestCallback, +) from pymongo.errors import ConfigurationError, OperationFailure from pymongo.saslprep import saslprep @@ -49,13 +55,13 @@ HAVE_KERBEROS = True _USE_PRINCIPAL = False try: - import winkerberos as kerberos + import winkerberos as kerberos # type:ignore[import] if tuple(map(int, kerberos.__version__.split(".")[:2])) >= (0, 5): _USE_PRINCIPAL = True except ImportError: try: - import kerberos + import kerberos # type:ignore[import] except ImportError: HAVE_KERBEROS = False @@ -132,7 +138,7 @@ def _build_credentials_tuple( raise ValueError("authentication source must be $external or None for GSSAPI") properties = extra.get("authmechanismproperties", {}) service_name = properties.get("SERVICE_NAME", "mongodb") - canonicalize = properties.get("CANONICALIZE_HOST_NAME", False) + canonicalize = bool(properties.get("CANONICALIZE_HOST_NAME", False)) service_realm = properties.get("SERVICE_REALM") props = GSSAPIProperties( service_name=service_name, @@ -163,8 +169,10 @@ def _build_credentials_tuple( return MongoCredential(mech, "$external", user, passwd, aws_props, None) elif mech == "MONGODB-OIDC": properties = extra.get("authmechanismproperties", {}) - request_token_callback = properties.get("request_token_callback") - provider_name = properties.get("PROVIDER_NAME", "") + callback = properties.get("OIDC_CALLBACK") + human_callback = properties.get("OIDC_HUMAN_CALLBACK") + environ = properties.get("ENVIRONMENT") + token_resource = properties.get("TOKEN_RESOURCE", "") default_allowed = [ "*.mongodb.net", "*.mongodb-dev.net", @@ -174,15 +182,51 @@ def _build_credentials_tuple( "127.0.0.1", "::1", ] - allowed_hosts = properties.get("allowed_hosts", default_allowed) - if not request_token_callback and provider_name != "aws": - raise ConfigurationError( - "authentication with MONGODB-OIDC requires providing an request_token_callback or a provider_name of 'aws'" - ) + allowed_hosts = properties.get("ALLOWED_HOSTS", default_allowed) + msg = ( + "authentication with MONGODB-OIDC requires providing either a callback or a environment" + ) + if passwd is not None: + msg = "password is not supported by MONGODB-OIDC" + raise ConfigurationError(msg) + if callback or human_callback: + if environ is not None: + raise ConfigurationError(msg) + if callback and human_callback: + msg = "cannot set both OIDC_CALLBACK and OIDC_HUMAN_CALLBACK" + raise ConfigurationError(msg) + elif environ is not None: + if environ == "test": + if user is not None: + msg = "test environment for MONGODB-OIDC does not support username" + raise ConfigurationError(msg) + callback = _OIDCTestCallback() + elif environ == "azure": + passwd = None + if not token_resource: + raise ConfigurationError( + "Azure environment for MONGODB-OIDC requires a TOKEN_RESOURCE auth mechanism property" + ) + callback = _OIDCAzureCallback(token_resource) + elif environ == "gcp": + passwd = None + if not token_resource: + raise ConfigurationError( + "GCP provider for MONGODB-OIDC requires a TOKEN_RESOURCE auth mechanism property" + ) + callback = _OIDCGCPCallback(token_resource) + else: + raise ConfigurationError(f"unrecognized ENVIRONMENT for MONGODB-OIDC: {environ}") + else: + raise ConfigurationError(msg) + oidc_props = _OIDCProperties( - request_token_callback=request_token_callback, - provider_name=provider_name, + callback=callback, + human_callback=human_callback, + environment=environ, allowed_hosts=allowed_hosts, + token_resource=token_resource, + username=user, ) return MongoCredential(mech, "$external", user, passwd, oidc_props, _Cache()) @@ -217,15 +261,13 @@ def _authenticate_scram_start( nonce = standard_b64encode(os.urandom(32)) first_bare = b"n=" + user + b",r=" + nonce - cmd = SON( - [ - ("saslStart", 1), - ("mechanism", mechanism), - ("payload", Binary(b"n,," + first_bare)), - ("autoAuthorize", 1), - ("options", {"skipEmptyExchange": True}), - ] - ) + cmd = { + "saslStart": 1, + "mechanism": mechanism, + "payload": Binary(b"n,," + first_bare), + "autoAuthorize": 1, + "options": {"skipEmptyExchange": True}, + } return nonce, first_bare, cmd @@ -288,13 +330,11 @@ def _authenticate_scram(credentials: MongoCredential, conn: Connection, mechanis server_sig = standard_b64encode(_hmac(server_key, auth_msg, digestmod).digest()) - cmd = SON( - [ - ("saslContinue", 1), - ("conversationId", res["conversationId"]), - ("payload", Binary(client_final)), - ] - ) + cmd = { + "saslContinue": 1, + "conversationId": res["conversationId"], + "payload": Binary(client_final), + } res = conn.command(source, cmd) parsed = _parse_scram_response(res["payload"]) @@ -304,13 +344,11 @@ def _authenticate_scram(credentials: MongoCredential, conn: Connection, mechanis # A third empty challenge may be required if the server does not support # skipEmptyExchange: SERVER-44857. if not res["done"]: - cmd = SON( - [ - ("saslContinue", 1), - ("conversationId", res["conversationId"]), - ("payload", Binary(b"")), - ] - ) + cmd = { + "saslContinue": 1, + "conversationId": res["conversationId"], + "payload": Binary(b""), + } res = conn.command(source, cmd) if not res["done"]: raise OperationFailure("SASL conversation failed to complete.") @@ -415,14 +453,12 @@ def _authenticate_gssapi(credentials: MongoCredential, conn: Connection) -> None # Since mongo accepts base64 strings as the payload we don't # have to use bson.binary.Binary. payload = kerberos.authGSSClientResponse(ctx) - cmd = SON( - [ - ("saslStart", 1), - ("mechanism", "GSSAPI"), - ("payload", payload), - ("autoAuthorize", 1), - ] - ) + cmd = { + "saslStart": 1, + "mechanism": "GSSAPI", + "payload": payload, + "autoAuthorize": 1, + } response = conn.command("$external", cmd) # Limit how many times we loop to catch protocol / library issues @@ -433,13 +469,11 @@ def _authenticate_gssapi(credentials: MongoCredential, conn: Connection) -> None payload = kerberos.authGSSClientResponse(ctx) or "" - cmd = SON( - [ - ("saslContinue", 1), - ("conversationId", response["conversationId"]), - ("payload", payload), - ] - ) + cmd = { + "saslContinue": 1, + "conversationId": response["conversationId"], + "payload": payload, + } response = conn.command("$external", cmd) if result == kerberos.AUTH_GSS_COMPLETE: @@ -456,13 +490,11 @@ def _authenticate_gssapi(credentials: MongoCredential, conn: Connection) -> None raise OperationFailure("Unknown kerberos failure during GSS_Wrap step.") payload = kerberos.authGSSClientResponse(ctx) - cmd = SON( - [ - ("saslContinue", 1), - ("conversationId", response["conversationId"]), - ("payload", payload), - ] - ) + cmd = { + "saslContinue": 1, + "conversationId": response["conversationId"], + "payload": payload, + } conn.command("$external", cmd) finally: @@ -478,14 +510,12 @@ def _authenticate_plain(credentials: MongoCredential, conn: Connection) -> None: username = credentials.username password = credentials.password payload = (f"\x00{username}\x00{password}").encode() - cmd = SON( - [ - ("saslStart", 1), - ("mechanism", "PLAIN"), - ("payload", Binary(payload)), - ("autoAuthorize", 1), - ] - ) + cmd = { + "saslStart": 1, + "mechanism": "PLAIN", + "payload": Binary(payload), + "autoAuthorize": 1, + } conn.command(source, cmd) @@ -511,7 +541,7 @@ def _authenticate_mongo_cr(credentials: MongoCredential, conn: Connection) -> No key = _auth_key(nonce, username, password) # Actually authenticate - query = SON([("authenticate", 1), ("user", username), ("nonce", nonce), ("key", key)]) + query = {"authenticate": 1, "user": username, "nonce": nonce, "key": key} conn.command(source, query) @@ -537,6 +567,7 @@ def _authenticate_default(credentials: MongoCredential, conn: Connection) -> Non "MONGODB-CR": _authenticate_mongo_cr, "MONGODB-X509": _authenticate_x509, "MONGODB-AWS": _authenticate_aws, + "MONGODB-OIDC": _authenticate_oidc, # type:ignore[dict-item] "PLAIN": _authenticate_plain, "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), @@ -588,7 +619,7 @@ def speculate_command(self) -> Optional[MutableMapping[str, Any]]: class _X509Context(_AuthContext): def speculate_command(self) -> MutableMapping[str, Any]: - cmd = SON([("authenticate", 1), ("mechanism", "MONGODB-X509")]) + cmd = {"authenticate": 1, "mechanism": "MONGODB-X509"} if self.credentials.username is not None: cmd["user"] = self.credentials.username return cmd @@ -597,7 +628,7 @@ def speculate_command(self) -> MutableMapping[str, Any]: class _OIDCContext(_AuthContext): def speculate_command(self) -> Optional[MutableMapping[str, Any]]: authenticator = _get_authenticator(self.credentials, self.address) - cmd = authenticator.auth_start_cmd(False) + cmd = authenticator.get_spec_auth_cmd() if cmd is None: return None cmd["db"] = self.credentials.source diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py index 81f30c7ae3..042eee5a73 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/auth_aws.py @@ -15,35 +15,10 @@ """MONGODB-AWS Authentication helpers.""" from __future__ import annotations -try: - import pymongo_auth_aws - from pymongo_auth_aws import AwsCredential, AwsSaslContext, PyMongoAuthAwsError - - _HAVE_MONGODB_AWS = True -except ImportError: - - class AwsSaslContext: # type: ignore - def __init__(self, credentials: MongoCredential): - pass - - _HAVE_MONGODB_AWS = False - -try: - from pymongo_auth_aws.auth import set_cached_credentials, set_use_cached_credentials - - # Enable credential caching. - set_use_cached_credentials(True) -except ImportError: - - def set_cached_credentials(_creds: Optional[AwsCredential]) -> None: - pass - - -from typing import TYPE_CHECKING, Any, Mapping, Optional, Type +from typing import TYPE_CHECKING, Any, Mapping, Type import bson from bson.binary import Binary -from bson.son import SON from pymongo.errors import ConfigurationError, OperationFailure if TYPE_CHECKING: @@ -52,61 +27,66 @@ def set_cached_credentials(_creds: Optional[AwsCredential]) -> None: from pymongo.pool import Connection -class _AwsSaslContext(AwsSaslContext): # type: ignore - # Dependency injection: - def binary_type(self) -> Type[Binary]: - """Return the bson.binary.Binary type.""" - return Binary - - def bson_encode(self, doc: Mapping[str, Any]) -> bytes: - """Encode a dictionary to BSON.""" - return bson.encode(doc) - - def bson_decode(self, data: _ReadableBuffer) -> Mapping[str, Any]: - """Decode BSON to a dictionary.""" - return bson.decode(data) - - def _authenticate_aws(credentials: MongoCredential, conn: Connection) -> None: """Authenticate using MONGODB-AWS.""" - if not _HAVE_MONGODB_AWS: + try: + import pymongo_auth_aws # type:ignore[import] + except ImportError as e: raise ConfigurationError( "MONGODB-AWS authentication requires pymongo-auth-aws: " "install with: python -m pip install 'pymongo[aws]'" - ) + ) from e + + # Delayed import. + from pymongo_auth_aws.auth import ( # type:ignore[import] + set_cached_credentials, + set_use_cached_credentials, + ) + + set_use_cached_credentials(True) if conn.max_wire_version < 9: raise ConfigurationError("MONGODB-AWS authentication requires MongoDB version 4.4 or later") + class AwsSaslContext(pymongo_auth_aws.AwsSaslContext): # type: ignore + # Dependency injection: + def binary_type(self) -> Type[Binary]: + """Return the bson.binary.Binary type.""" + return Binary + + def bson_encode(self, doc: Mapping[str, Any]) -> bytes: + """Encode a dictionary to BSON.""" + return bson.encode(doc) + + def bson_decode(self, data: _ReadableBuffer) -> Mapping[str, Any]: + """Decode BSON to a dictionary.""" + return bson.decode(data) + try: - ctx = _AwsSaslContext( - AwsCredential( + ctx = AwsSaslContext( + pymongo_auth_aws.AwsCredential( credentials.username, credentials.password, credentials.mechanism_properties.aws_session_token, ) ) client_payload = ctx.step(None) - client_first = SON( - [("saslStart", 1), ("mechanism", "MONGODB-AWS"), ("payload", client_payload)] - ) + client_first = {"saslStart": 1, "mechanism": "MONGODB-AWS", "payload": client_payload} server_first = conn.command("$external", client_first) res = server_first # Limit how many times we loop to catch protocol / library issues for _ in range(10): client_payload = ctx.step(res["payload"]) - cmd = SON( - [ - ("saslContinue", 1), - ("conversationId", server_first["conversationId"]), - ("payload", client_payload), - ] - ) + cmd = { + "saslContinue": 1, + "conversationId": server_first["conversationId"], + "payload": client_payload, + } res = conn.command("$external", cmd) if res["done"]: # SASL complete. break - except PyMongoAuthAwsError as exc: + except pymongo_auth_aws.PyMongoAuthAwsError as exc: # Clear the cached credentials if we hit a failure in auth. set_cached_credentials(None) # Convert to OperationFailure and include pymongo-auth-aws version. diff --git a/pymongo/auth_oidc.py b/pymongo/auth_oidc.py index ad9223809e..bfe2340f0a 100644 --- a/pymongo/auth_oidc.py +++ b/pymongo/auth_oidc.py @@ -15,32 +15,75 @@ """MONGODB-OIDC Authentication helpers.""" from __future__ import annotations +import abc +import os import threading +import time from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Callable, Mapping, MutableMapping, Optional +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, Optional, Union +from urllib.parse import quote import bson from bson.binary import Binary -from bson.son import SON +from pymongo._azure_helpers import _get_azure_response +from pymongo._csot import remaining +from pymongo._gcp_helpers import _get_gcp_response from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.helpers import _AUTHENTICATION_FAILURE_CODE if TYPE_CHECKING: from pymongo.auth import MongoCredential from pymongo.pool import Connection +@dataclass +class OIDCIdPInfo: + issuer: str + clientId: Optional[str] = field(default=None) + requestScopes: Optional[list[str]] = field(default=None) + + +@dataclass +class OIDCCallbackContext: + timeout_seconds: float + username: str + version: int + refresh_token: Optional[str] = field(default=None) + idp_info: Optional[OIDCIdPInfo] = field(default=None) + + +@dataclass +class OIDCCallbackResult: + access_token: str + expires_in_seconds: Optional[float] = field(default=None) + refresh_token: Optional[str] = field(default=None) + + +class OIDCCallback(abc.ABC): + """A base class for defining OIDC callbacks.""" + + @abc.abstractmethod + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + """Convert the given BSON value into our own type.""" + + @dataclass class _OIDCProperties: - request_token_callback: Optional[Callable[..., dict]] - provider_name: Optional[str] - allowed_hosts: list[str] + callback: Optional[OIDCCallback] = field(default=None) + human_callback: Optional[OIDCCallback] = field(default=None) + environment: Optional[str] = field(default=None) + allowed_hosts: list[str] = field(default_factory=list) + token_resource: Optional[str] = field(default=None) + username: str = "" """Mechanism properties for MONGODB-OIDC authentication.""" TOKEN_BUFFER_MINUTES = 5 -CALLBACK_TIMEOUT_SECONDS = 5 * 60 +HUMAN_CALLBACK_TIMEOUT_SECONDS = 5 * 60 CALLBACK_VERSION = 1 +MACHINE_CALLBACK_TIMEOUT_SECONDS = 60 +TIME_BETWEEN_CALLS_SECONDS = 0.1 def _get_authenticator( @@ -54,7 +97,7 @@ def _get_authenticator( properties = credentials.mechanism_properties # Validate that the address is allowed. - if not properties.provider_name: + if not properties.environment: found = False allowed_hosts = properties.allowed_hosts for patt in allowed_hosts: @@ -72,28 +115,147 @@ def _get_authenticator( return credentials.cache.data +class _OIDCTestCallback(OIDCCallback): + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + token_file = os.environ.get("OIDC_TOKEN_FILE") + if not token_file: + raise RuntimeError( + 'MONGODB-OIDC with an "test" provider requires "OIDC_TOKEN_FILE" to be set' + ) + with open(token_file) as fid: + return OIDCCallbackResult(access_token=fid.read().strip()) + + +class _OIDCAzureCallback(OIDCCallback): + def __init__(self, token_resource: str) -> None: + self.token_resource = quote(token_resource) + + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + resp = _get_azure_response(self.token_resource, context.username, context.timeout_seconds) + return OIDCCallbackResult( + access_token=resp["access_token"], expires_in_seconds=resp["expires_in"] + ) + + +class _OIDCGCPCallback(OIDCCallback): + def __init__(self, token_resource: str) -> None: + self.token_resource = quote(token_resource) + + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + resp = _get_gcp_response(self.token_resource, context.timeout_seconds) + return OIDCCallbackResult(access_token=resp["access_token"]) + + @dataclass class _OIDCAuthenticator: username: str properties: _OIDCProperties refresh_token: Optional[str] = field(default=None) access_token: Optional[str] = field(default=None) - idp_info: Optional[dict] = field(default=None) + idp_info: Optional[OIDCIdPInfo] = field(default=None) token_gen_id: int = field(default=0) lock: threading.Lock = field(default_factory=threading.Lock) + last_call_time: float = field(default=0) - def get_current_token(self, use_callback: bool = True) -> Optional[str]: + def reauthenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: + """Handle a reauthenticate from the server.""" + # Invalidate the token for the connection. + self._invalidate(conn) + # Call the appropriate auth logic for the callback type. + if self.properties.callback: + return self._authenticate_machine(conn) + return self._authenticate_human(conn) + + def authenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: + """Handle an initial authenticate request.""" + # First handle speculative auth. + # If it succeeded, we are done. + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + resp = ctx.speculative_authenticate + if resp and resp["done"]: + conn.oidc_token_gen_id = self.token_gen_id + return resp + + # If spec auth failed, call the appropriate auth logic for the callback type. + # We cannot assume that the token is invalid, because a proxy may have been + # involved that stripped the speculative auth information. + if self.properties.callback: + return self._authenticate_machine(conn) + return self._authenticate_human(conn) + + def get_spec_auth_cmd(self) -> Optional[MutableMapping[str, Any]]: + """Get the appropriate speculative auth command.""" + if not self.access_token: + return None + return self._get_start_command({"jwt": self.access_token}) + + def _authenticate_machine(self, conn: Connection) -> Mapping[str, Any]: + # If there is a cached access token, try to authenticate with it. If + # authentication fails with error code 18, invalidate the access token, + # fetch a new access token, and try to authenticate again. If authentication + # fails for any other reason, raise the error to the user. + if self.access_token: + try: + return self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + return self._authenticate_machine(conn) + raise + return self._sasl_start_jwt(conn) + + def _authenticate_human(self, conn: Connection) -> Optional[Mapping[str, Any]]: + # If we have a cached access token, try a JwtStepRequest. + # authentication fails with error code 18, invalidate the access token, + # and try to authenticate again. If authentication fails for any other + # reason, raise the error to the user. + if self.access_token: + try: + return self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + return self._authenticate_human(conn) + raise + + # If we have a cached refresh token, try a JwtStepRequest with that. + # If authentication fails with error code 18, invalidate the access and + # refresh tokens, and try to authenticate again. If authentication fails for + # any other reason, raise the error to the user. + if self.refresh_token: + try: + return self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + self.refresh_token = None + return self._authenticate_human(conn) + raise + + # Start a new Two-Step SASL conversation. + # Run a PrincipalStepRequest to get the IdpInfo. + cmd = self._get_start_command(None) + start_resp = self._run_command(conn, cmd) + # Attempt to authenticate with a JwtStepRequest. + return self._sasl_continue_jwt(conn, start_resp) + + def _get_access_token(self) -> Optional[str]: properties = self.properties + cb: Union[None, OIDCCallback] + resp: OIDCCallbackResult - # TODO: DRIVERS-2672, handle machine callback here as well. - cb = properties.request_token_callback if use_callback else None - cb_type = "human" + is_human = properties.human_callback is not None + if is_human and self.idp_info is None: + return None + + if properties.callback: + cb = properties.callback + if properties.human_callback: + cb = properties.human_callback prev_token = self.access_token if prev_token: return prev_token - if not use_callback and not prev_token: + if cb is None and not prev_token: return None if not prev_token and cb is not None: @@ -104,165 +266,92 @@ def get_current_token(self, use_callback: bool = True) -> Optional[str]: if new_token != prev_token: return new_token - # TODO: DRIVERS-2672 handle machine callback here. - if cb_type == "human": - context = { - "timeout_seconds": CALLBACK_TIMEOUT_SECONDS, - "version": CALLBACK_VERSION, - "refresh_token": self.refresh_token, - } - resp = cb(self.idp_info, context) - - self.validate_request_token_response(resp) - + # Ensure that we are waiting a min time between callback invocations. + delta = time.time() - self.last_call_time + if delta < TIME_BETWEEN_CALLS_SECONDS: + time.sleep(TIME_BETWEEN_CALLS_SECONDS - delta) + self.last_call_time = time.time() + + if is_human: + timeout = HUMAN_CALLBACK_TIMEOUT_SECONDS + assert self.idp_info is not None + else: + timeout = int(remaining() or MACHINE_CALLBACK_TIMEOUT_SECONDS) + context = OIDCCallbackContext( + timeout_seconds=timeout, + version=CALLBACK_VERSION, + refresh_token=self.refresh_token, + idp_info=self.idp_info, + username=self.properties.username, + ) + resp = cb.fetch(context) + if not isinstance(resp, OIDCCallbackResult): + raise ValueError("Callback result must be of type OIDCCallbackResult") + self.refresh_token = resp.refresh_token + self.access_token = resp.access_token self.token_gen_id += 1 return self.access_token - def validate_request_token_response(self, resp: Mapping[str, Any]) -> None: - # Validate callback return value. - if not isinstance(resp, dict): - raise ValueError("OIDC callback returned invalid result") - - if "access_token" not in resp: - raise ValueError("OIDC callback did not return an access_token") - - expected = ["access_token", "refresh_token", "expires_in_seconds"] - for key in resp: - if key not in expected: - raise ValueError(f'Unexpected field in callback result "{key}"') - - self.access_token = resp["access_token"] - self.refresh_token = resp.get("refresh_token") - - def principal_step_cmd(self) -> SON[str, Any]: - """Get a SASL start command with an optional principal name""" - # Send the SASL start with the optional principal name. - payload = {} - - principal_name = self.username - if principal_name: - payload["n"] = principal_name - - return SON( - [ - ("saslStart", 1), - ("mechanism", "MONGODB-OIDC"), - ("payload", Binary(bson.encode(payload))), - ("autoAuthorize", 1), - ] - ) - - def auth_start_cmd(self, use_callback: bool = True) -> Optional[SON[str, Any]]: - # TODO: DRIVERS-2672, check for provider_name in self.properties here. - if self.idp_info is None: - return self.principal_step_cmd() - - token = self.get_current_token(use_callback) - if not token: - return None - bin_payload = Binary(bson.encode({"jwt": token})) - return SON( - [ - ("saslStart", 1), - ("mechanism", "MONGODB-OIDC"), - ("payload", bin_payload), - ] - ) - - def run_command( - self, conn: Connection, cmd: MutableMapping[str, Any] - ) -> Optional[Mapping[str, Any]]: + def _run_command(self, conn: Connection, cmd: MutableMapping[str, Any]) -> Mapping[str, Any]: try: return conn.command("$external", cmd, no_reauth=True) # type: ignore[call-arg] - except OperationFailure: - self.access_token = None + except OperationFailure as e: + if self._is_auth_error(e): + self._invalidate(conn) raise - def reauthenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: - """Handle a reauthenticate from the server.""" - # First see if we have the a newer token on the authenticator. - prev_id = conn.oidc_token_gen_id or 0 - # If we've already changed tokens, make one optimistic attempt. - if (prev_id < self.token_gen_id) and self.access_token: - try: - return self.authenticate(conn) - except OperationFailure: - pass - + def _is_auth_error(self, err: Exception) -> bool: + if not isinstance(err, OperationFailure): + return False + return err.code == _AUTHENTICATION_FAILURE_CODE + + def _invalidate(self, conn: Connection) -> None: + # Ignore the invalidation if a token gen id is given and is less than our + # current token gen id. + token_gen_id = conn.oidc_token_gen_id or 0 + if token_gen_id is not None and token_gen_id < self.token_gen_id: + return self.access_token = None - # TODO: DRIVERS-2672, check for provider_name in self.properties here. - # If so, we clear the access token and return finish_auth. - - # Next see if the idp info has changed. - prev_idp_info = self.idp_info - self.idp_info = None - cmd = self.principal_step_cmd() - resp = self.run_command(conn, cmd) - assert resp is not None - server_resp: dict = bson.decode(resp["payload"]) - if "issuer" in server_resp: - self.idp_info = server_resp - - # Handle the case of changed idp info. - if self.idp_info != prev_idp_info: - self.access_token = None - self.refresh_token = None - - # If we have a refresh token, try using that. - if self.refresh_token: - try: - return self.finish_auth(resp, conn) - except OperationFailure: - self.refresh_token = None - # If that fails, try again without the refresh token. - return self.authenticate(conn) - - # If we don't have a refresh token, just try once. - return self.finish_auth(resp, conn) - - def authenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: - ctx = conn.auth_ctx - cmd = None - - if ctx and ctx.speculate_succeeded(): - resp = ctx.speculative_authenticate - else: - cmd = self.auth_start_cmd() - assert cmd is not None - resp = self.run_command(conn, cmd) - - assert resp is not None - if resp["done"]: - conn.oidc_token_gen_id = self.token_gen_id - return None - - server_resp: dict = bson.decode(resp["payload"]) - if "issuer" in server_resp: - self.idp_info = server_resp - - return self.finish_auth(resp, conn) + def _sasl_continue_jwt( + self, conn: Connection, start_resp: Mapping[str, Any] + ) -> Mapping[str, Any]: + self.access_token = None + self.refresh_token = None + start_payload: dict = bson.decode(start_resp["payload"]) + if "issuer" in start_payload: + self.idp_info = OIDCIdPInfo(**start_payload) + access_token = self._get_access_token() + conn.oidc_token_gen_id = self.token_gen_id + cmd = self._get_continue_command({"jwt": access_token}, start_resp) + return self._run_command(conn, cmd) - def finish_auth( - self, orig_resp: Mapping[str, Any], conn: Connection - ) -> Optional[Mapping[str, Any]]: - conversation_id = orig_resp["conversationId"] - token = self.get_current_token() + def _sasl_start_jwt(self, conn: Connection) -> Mapping[str, Any]: + access_token = self._get_access_token() conn.oidc_token_gen_id = self.token_gen_id - bin_payload = Binary(bson.encode({"jwt": token})) - cmd = SON( - [ - ("saslContinue", 1), - ("conversationId", conversation_id), - ("payload", bin_payload), - ] - ) - resp = self.run_command(conn, cmd) - assert resp is not None - if not resp["done"]: - raise OperationFailure("SASL conversation failed to complete.") - return resp + cmd = self._get_start_command({"jwt": access_token}) + return self._run_command(conn, cmd) + + def _get_start_command(self, payload: Optional[Mapping[str, Any]]) -> MutableMapping[str, Any]: + if payload is None: + principal_name = self.username + if principal_name: + payload = {"n": principal_name} + else: + payload = {} + bin_payload = Binary(bson.encode(payload)) + return {"saslStart": 1, "mechanism": "MONGODB-OIDC", "payload": bin_payload} + + def _get_continue_command( + self, payload: Mapping[str, Any], start_resp: Mapping[str, Any] + ) -> MutableMapping[str, Any]: + bin_payload = Binary(bson.encode(payload)) + return { + "saslContinue": 1, + "payload": bin_payload, + "conversationId": start_resp["conversationId"], + } def _authenticate_oidc( diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 10e77d8b12..e1c46105f7 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -34,7 +34,6 @@ from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument -from bson.son import SON from pymongo import _csot, common from pymongo.client_session import ClientSession, _validate_session_write_concern from pymongo.common import ( @@ -89,18 +88,16 @@ def __init__(self, op_type: int) -> None: def index(self, idx: int) -> int: """Get the original index of an operation in this run. - :Parameters: - - `idx`: The Run index that maps to the original index. + :param idx: The Run index that maps to the original index. """ return self.index_map[idx] def add(self, original_index: int, operation: Any) -> None: """Add an operation to this Run instance. - :Parameters: - - `original_index`: The original index of this operation + :param original_index: The original index of this operation within a larger bulk operation. - - `operation`: The operation document. + :param operation: The operation document. """ self.index_map.append(original_index) self.ops.append(operation) @@ -152,8 +149,19 @@ def _merge_command( def _raise_bulk_write_error(full_result: _DocumentOut) -> NoReturn: """Raise a BulkWriteError from the full bulk api result.""" + # retryWrites on MMAPv1 should raise an actionable error. if full_result["writeErrors"]: full_result["writeErrors"].sort(key=lambda error: error["index"]) + err = full_result["writeErrors"][0] + code = err["code"] + msg = err["errmsg"] + if code == 20 and msg.startswith("Transaction numbers"): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string." + ) + raise OperationFailure(errmsg, code, full_result) raise BulkWriteError(full_result) @@ -217,7 +225,7 @@ def add_update( upsert: bool = False, collation: Optional[Mapping[str, Any]] = None, array_filters: Optional[list[Mapping[str, Any]]] = None, - hint: Union[str, SON[str, Any], None] = None, + hint: Union[str, dict[str, Any], None] = None, ) -> None: """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) @@ -244,11 +252,11 @@ def add_replace( replacement: Mapping[str, Any], upsert: bool = False, collation: Optional[Mapping[str, Any]] = None, - hint: Union[str, SON[str, Any], None] = None, + hint: Union[str, dict[str, Any], None] = None, ) -> None: """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) - cmd = SON([("q", selector), ("u", replacement), ("multi", False), ("upsert", upsert)]) + cmd = {"q": selector, "u": replacement, "multi": False, "upsert": upsert} if collation is not None: self.uses_collation = True cmd["collation"] = collation @@ -262,10 +270,10 @@ def add_delete( selector: Mapping[str, Any], limit: int, collation: Optional[Mapping[str, Any]] = None, - hint: Union[str, SON[str, Any], None] = None, + hint: Union[str, dict[str, Any], None] = None, ) -> None: """Create a delete document and add it to the list of ops.""" - cmd = SON([("q", selector), ("limit", limit)]) + cmd = {"q": selector, "limit": limit} if collation is not None: self.uses_collation = True cmd["collation"] = collation @@ -352,7 +360,7 @@ def _execute_command( if last_run and (len(run.ops) - run.idx_offset) == 1: write_concern = final_write_concern or write_concern - cmd = SON([(cmd_name, self.collection.name), ("ordered", self.ordered)]) + cmd = {cmd_name: self.collection.name, "ordered": self.ordered} if self.comment: cmd["comment"] = self.comment _csot.apply_write_concern(cmd, write_concern) @@ -411,6 +419,7 @@ def execute_command( generator: Iterator[Any], write_concern: WriteConcern, session: Optional[ClientSession], + operation: str, ) -> dict[str, Any]: """Execute using write commands.""" # nModified is only reported for write commands, not legacy ops. @@ -440,7 +449,14 @@ def retryable_bulk( ) client = self.collection.database.client - client._retryable_write(self.is_retryable, retryable_bulk, session, bulk=self) + client._retryable_write( + self.is_retryable, + retryable_bulk, + session, + operation, + bulk=self, + operation_id=op_id, + ) if full_result["writeErrors"] or full_result["writeConcernErrors"]: _raise_bulk_write_error(full_result) @@ -471,13 +487,11 @@ def execute_op_msg_no_results(self, conn: Connection, generator: Iterator[Any]) ) while run.idx_offset < len(run.ops): - cmd = SON( - [ - (cmd_name, self.collection.name), - ("ordered", False), - ("writeConcern", {"w": 0}), - ] - ) + cmd = { + cmd_name: self.collection.name, + "ordered": False, + "writeConcern": {"w": 0}, + } conn.add_server_api(cmd) ops = islice(run.ops, run.idx_offset, None) # Run as many ops as possible. @@ -552,7 +566,12 @@ def execute_no_results( return self.execute_command_no_results(conn, generator, write_concern) return self.execute_op_msg_no_results(conn, generator) - def execute(self, write_concern: WriteConcern, session: Optional[ClientSession]) -> Any: + def execute( + self, + write_concern: WriteConcern, + session: Optional[ClientSession], + operation: str, + ) -> Any: """Execute operations.""" if not self.ops: raise InvalidOperation("No operations to execute") @@ -569,8 +588,8 @@ def execute(self, write_concern: WriteConcern, session: Optional[ClientSession]) client = self.collection.database.client if not write_concern.acknowledged: - with client._conn_for_writes(session) as connection: + with client._conn_for_writes(session, operation) as connection: self.execute_no_results(connection, generator, write_concern) return None else: - return self.execute_command(generator, write_concern, session) + return self.execute_command(generator, write_concern, session, operation) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 75cd169790..300bd88e92 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -36,6 +36,7 @@ OperationFailure, PyMongoError, ) +from pymongo.operations import _Op from pymongo.typings import _CollationIn, _DocumentType, _Pipeline # The change streams spec considers the following server errors from the @@ -178,8 +179,7 @@ def _change_stream_options(self) -> dict[str, Any]: options["startAfter"] = resume_token else: options["resumeAfter"] = resume_token - - if self._start_at_operation_time is not None: + elif self._start_at_operation_time is not None: options["startAtOperationTime"] = self._start_at_operation_time if self._show_expanded_events: @@ -244,7 +244,10 @@ def _run_aggregation_cmd( comment=self._comment, ) return self._client._retryable_read( - cmd.get_cursor, self._target._read_preference_for(session), session + cmd.get_cursor, + self._target._read_preference_for(session), + session, + operation=_Op.AGGREGATE, ) def _create_cursor(self) -> CommandCursor: @@ -356,8 +359,7 @@ def try_next(self) -> Optional[_DocumentType]: document is returned, otherwise, if the getMore returns no documents (because there have been no changes) then ``None`` is returned. - :Returns: - The next change document or ``None`` when no document is available + :return: The next change document or ``None`` when no document is available after running a single getMore or when the cursor is closed. .. versionadded:: 3.8 diff --git a/pymongo/client_options.py b/pymongo/client_options.py index d5f9cfcccd..9c745b11ef 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -19,8 +19,6 @@ from bson.codec_options import _parse_codec_options from pymongo import common -from pymongo.auth import MongoCredential, _build_credentials_tuple -from pymongo.common import validate_boolean from pymongo.compression_support import CompressionSettings from pymongo.errors import ConfigurationError from pymongo.monitoring import _EventListener, _EventListeners @@ -33,11 +31,12 @@ ) from pymongo.server_selectors import any_server_selector from pymongo.ssl_support import get_ssl_context -from pymongo.write_concern import WriteConcern +from pymongo.write_concern import WriteConcern, validate_boolean if TYPE_CHECKING: from bson.codec_options import CodecOptions - from pymongo.encryption import AutoEncryptionOpts + from pymongo.auth import MongoCredential + from pymongo.encryption_options import AutoEncryptionOpts from pymongo.pyopenssl_context import SSLContext from pymongo.topology_description import _ServerSelector @@ -49,6 +48,8 @@ def _parse_credentials( mechanism = options.get("authmechanism", "DEFAULT" if username else None) source = options.get("authsource") if username or mechanism: + from pymongo.auth import _build_credentials_tuple + return _build_credentials_tuple(mechanism, source, username, password, options, database) return None diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 0aac770111..7dd1996afd 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -154,7 +154,6 @@ from bson.binary import Binary from bson.int64 import Int64 -from bson.son import SON from bson.timestamp import Timestamp from pymongo import _csot from pymongo.cursor import _ConnectionManager @@ -167,6 +166,7 @@ WTimeoutError, ) from pymongo.helpers import _RETRYABLE_ERROR_CODES +from pymongo.operations import _Op from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_type import SERVER_TYPE @@ -183,13 +183,12 @@ class SessionOptions: """Options for a new :class:`ClientSession`. - :Parameters: - - `causal_consistency` (optional): If True, read operations are causally + :param causal_consistency: If True, read operations are causally ordered within the session. Defaults to True when the ``snapshot`` option is ``False``. - - `default_transaction_options` (optional): The default + :param default_transaction_options: The default TransactionOptions to use for transactions started on this session. - - `snapshot` (optional): If True, then all reads performed using this + :param snapshot: If True, then all reads performed using this session will read from the same snapshot. This option is incompatible with ``causal_consistency=True``. Defaults to ``False``. @@ -247,21 +246,20 @@ def snapshot(self) -> Optional[bool]: class TransactionOptions: """Options for :meth:`ClientSession.start_transaction`. - :Parameters: - - `read_concern` (optional): The + :param read_concern: The :class:`~pymongo.read_concern.ReadConcern` to use for this transaction. If ``None`` (the default) the :attr:`read_preference` of the :class:`MongoClient` is used. - - `write_concern` (optional): The + :param write_concern: The :class:`~pymongo.write_concern.WriteConcern` to use for this transaction. If ``None`` (the default) the :attr:`read_preference` of the :class:`MongoClient` is used. - - `read_preference` (optional): The read preference to use. If + :param read_preference: The read preference to use. If ``None`` (the default) the :attr:`read_preference` of this :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` for options. Transactions which read must use :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. - - `max_commit_time_ms` (optional): The maximum amount of time to allow a + :param max_commit_time_ms: The maximum amount of time to allow a single commitTransaction command to run. This option is an alias for maxTimeMS option on the commitTransaction command. If ``None`` (the default) maxTimeMS is not used. @@ -517,9 +515,6 @@ def end_session(self) -> None: It is an error to use the session after the session has ended. """ - self._end_session(lock=True) - - def _end_session(self, lock: bool) -> None: if self._server_session is not None: try: if self.in_transaction: @@ -528,7 +523,7 @@ def _end_session(self, lock: bool) -> None: # is in the committed state when the session is discarded. self._unpin() finally: - self._client._return_server_session(self._server_session, lock) + self._client._return_server_session(self._server_session) self._server_session = None def _check_ended(self) -> None: @@ -539,7 +534,7 @@ def __enter__(self) -> ClientSession: return self def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self._end_session(lock=True) + self.end_session() @property def client(self) -> MongoClient: @@ -557,8 +552,15 @@ def options(self) -> SessionOptions: def session_id(self) -> Mapping[str, Any]: """A BSON document, the opaque server session identifier.""" self._check_ended() + self._materialize(self._client.topology_description.logical_session_timeout_minutes) return self._server_session.session_id + @property + def _transaction_id(self) -> Int64: + """The current transaction id for the underlying server session.""" + self._materialize(self._client.topology_description.logical_session_timeout_minutes) + return self._server_session.transaction_id + @property def cluster_time(self) -> Optional[ClusterTime]: """The cluster time returned by the last operation executed @@ -655,24 +657,22 @@ def callback(session, custom_arg, custom_kwarg=None): timeout is reached will be re-raised. Applications that desire a different timeout duration should not use this method. - :Parameters: - - `callback`: The callable ``callback`` to run inside a transaction. + :param callback: The callable ``callback`` to run inside a transaction. The callable must accept a single argument, this session. Note, under certain error conditions the callback may be run multiple times. - - `read_concern` (optional): The + :param read_concern: The :class:`~pymongo.read_concern.ReadConcern` to use for this transaction. - - `write_concern` (optional): The + :param write_concern: The :class:`~pymongo.write_concern.WriteConcern` to use for this transaction. - - `read_preference` (optional): The read preference to use for this + :param read_preference: The read preference to use for this transaction. If ``None`` (the default) the :attr:`read_preference` of this :class:`Database` is used. See :mod:`~pymongo.read_preferences` for options. - :Returns: - The return value of the ``callback``. + :return: The return value of the ``callback``. .. versionadded:: 3.9 """ @@ -833,8 +833,7 @@ def abort_transaction(self) -> None: def _finish_transaction_with_retry(self, command_name: str) -> dict[str, Any]: """Run commit or abort with one retry after any retryable error. - :Parameters: - - `command_name`: Either "commitTransaction" or "abortTransaction". + :param command_name: Either "commitTransaction" or "abortTransaction". """ def func( @@ -842,14 +841,14 @@ def func( ) -> dict[str, Any]: return self._finish_transaction(conn, command_name) - return self._client._retry_internal(func, self, None, retryable=True) + return self._client._retry_internal(func, self, None, retryable=True, operation=_Op.ABORT) def _finish_transaction(self, conn: Connection, command_name: str) -> dict[str, Any]: self._transaction.attempt += 1 opts = self._transaction.opts assert opts wc = opts.write_concern - cmd = SON([(command_name, 1)]) + cmd = {command_name: 1} if command_name == "commitTransaction": if opts.max_commit_time_ms and _csot.get_timeout() is None: cmd["maxTimeMS"] = opts.max_commit_time_ms @@ -882,8 +881,7 @@ def _advance_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> No def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: """Update the cluster time for this session. - :Parameters: - - `cluster_time`: The + :param cluster_time: The :data:`~pymongo.client_session.ClientSession.cluster_time` from another `ClientSession` instance. """ @@ -904,8 +902,7 @@ def _advance_operation_time(self, operation_time: Optional[Timestamp]) -> None: def advance_operation_time(self, operation_time: Timestamp) -> None: """Update the operation time for this session. - :Parameters: - - `operation_time`: The + :param operation_time: The :data:`~pymongo.client_session.ClientSession.operation_time` from another `ClientSession` instance. """ @@ -973,10 +970,12 @@ def _txn_read_preference(self) -> Optional[_ServerMode]: return self._transaction.opts.read_preference return None - def _materialize(self) -> None: + def _materialize(self, logical_session_timeout_minutes: Optional[int] = None) -> None: if isinstance(self._server_session, _EmptyServerSession): old = self._server_session - self._server_session = self._client._topology.get_server_session() + self._server_session = self._client._topology.get_server_session( + logical_session_timeout_minutes + ) if old.started_retryable_write: self._server_session.inc_transaction_id() @@ -987,8 +986,12 @@ def _apply_to( read_preference: _ServerMode, conn: Connection, ) -> None: + if not conn.supports_sessions: + if not self._implicit: + raise ConfigurationError("Sessions are not supported by this MongoDB deployment") + return self._check_ended() - self._materialize() + self._materialize(conn.logical_session_timeout_minutes) if self.options.snapshot: self._update_read_concern(command, conn) @@ -1070,7 +1073,10 @@ def mark_dirty(self) -> None: """ self.dirty = True - def timed_out(self, session_timeout_minutes: float) -> bool: + def timed_out(self, session_timeout_minutes: Optional[int]) -> bool: + if session_timeout_minutes is None: + return False + idle_seconds = time.monotonic() - self.last_use # Timed out if we have less than a minute to live. @@ -1088,7 +1094,7 @@ def inc_transaction_id(self) -> None: class _ServerSessionPool(collections.deque): """Pool of _ServerSession objects. - This class is not thread-safe, access it while holding the Topology lock. + This class is thread-safe. """ def __init__(self, *args: Any, **kwargs: Any): @@ -1101,11 +1107,14 @@ def reset(self) -> None: def pop_all(self) -> list[_ServerSession]: ids = [] - while self: - ids.append(self.pop().session_id) + while True: + try: + ids.append(self.pop().session_id) + except IndexError: + break return ids - def get_server_session(self, session_timeout_minutes: float) -> _ServerSession: + def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: # Although the Driver Sessions Spec says we only clear stale sessions # in return_server_session, PyMongo can't take a lock when returning # sessions from a __del__ method (like in Cursor.__die), so it can't @@ -1114,33 +1123,30 @@ def get_server_session(self, session_timeout_minutes: float) -> _ServerSession: self._clear_stale(session_timeout_minutes) # The most recently used sessions are on the left. - while self: - s = self.popleft() + while True: + try: + s = self.popleft() + except IndexError: + break if not s.timed_out(session_timeout_minutes): return s return _ServerSession(self.generation) - def return_server_session( - self, server_session: _ServerSession, session_timeout_minutes: Optional[float] - ) -> None: - if session_timeout_minutes is not None: - self._clear_stale(session_timeout_minutes) - if server_session.timed_out(session_timeout_minutes): - return - self.return_server_session_no_lock(server_session) - - def return_server_session_no_lock(self, server_session: _ServerSession) -> None: + def return_server_session(self, server_session: _ServerSession) -> None: # Discard sessions from an old pool to avoid duplicate sessions in the # child process after a fork. if server_session.generation == self.generation and not server_session.dirty: self.appendleft(server_session) - def _clear_stale(self, session_timeout_minutes: float) -> None: + def _clear_stale(self, session_timeout_minutes: Optional[int]) -> None: # Clear stale sessions. The least recently used are on the right. - while self: - if self[-1].timed_out(session_timeout_minutes): - self.pop() - else: + while True: + try: + s = self.pop() + except IndexError: + break + if not s.timed_out(session_timeout_minutes): + self.append(s) # The remaining sessions also haven't timed out. break diff --git a/pymongo/collation.py b/pymongo/collation.py index e940868e59..971628f4ec 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -21,6 +21,7 @@ from typing import Any, Mapping, Optional, Union from pymongo import common +from pymongo.write_concern import validate_boolean class CollationStrength: @@ -96,22 +97,21 @@ class CollationCaseFirst: class Collation: """Collation - :Parameters: - - `locale`: (string) The locale of the collation. This should be a string + :param locale: (string) The locale of the collation. This should be a string that identifies an `ICU locale ID` exactly. For example, ``en_US`` is valid, but ``en_us`` and ``en-US`` are not. Consult the MongoDB documentation for a list of supported locales. - - `caseLevel`: (optional) If ``True``, turn on case sensitivity if + :param caseLevel: (optional) If ``True``, turn on case sensitivity if `strength` is 1 or 2 (case sensitivity is implied if `strength` is greater than 2). Defaults to ``False``. - - `caseFirst`: (optional) Specify that either uppercase or lowercase + :param caseFirst: (optional) Specify that either uppercase or lowercase characters take precedence. Must be one of the following values: * :data:`~CollationCaseFirst.UPPER` * :data:`~CollationCaseFirst.LOWER` * :data:`~CollationCaseFirst.OFF` (the default) - - `strength`: (optional) Specify the comparison strength. This is also + :param strength: Specify the comparison strength. This is also known as the ICU comparison level. This must be one of the following values: @@ -125,27 +125,27 @@ class Collation: `strength` of :data:`~CollationStrength.SECONDARY` differentiates characters based both on the unadorned base character and its accents. - - `numericOrdering`: (optional) If ``True``, order numbers numerically + :param numericOrdering: If ``True``, order numbers numerically instead of in collation order (defaults to ``False``). - - `alternate`: (optional) Specify whether spaces and punctuation are + :param alternate: Specify whether spaces and punctuation are considered base characters. This must be one of the following values: * :data:`~CollationAlternate.NON_IGNORABLE` (the default) * :data:`~CollationAlternate.SHIFTED` - - `maxVariable`: (optional) When `alternate` is + :param maxVariable: When `alternate` is :data:`~CollationAlternate.SHIFTED`, this option specifies what characters may be ignored. This must be one of the following values: * :data:`~CollationMaxVariable.PUNCT` (the default) * :data:`~CollationMaxVariable.SPACE` - - `normalization`: (optional) If ``True``, normalizes text into Unicode + :param normalization: If ``True``, normalizes text into Unicode NFD. Defaults to ``False``. - - `backwards`: (optional) If ``True``, accents on characters are + :param backwards: If ``True``, accents on characters are considered from the back of the word to the front, as it is done in some French dictionary ordering traditions. Defaults to ``False``. - - `kwargs`: (optional) Keyword arguments supplying any additional options + :param kwargs: Keyword arguments supplying any additional options to be sent with this Collation object. .. versionadded: 3.4 @@ -170,13 +170,13 @@ def __init__( locale = common.validate_string("locale", locale) self.__document: dict[str, Any] = {"locale": locale} if caseLevel is not None: - self.__document["caseLevel"] = common.validate_boolean("caseLevel", caseLevel) + self.__document["caseLevel"] = validate_boolean("caseLevel", caseLevel) if caseFirst is not None: self.__document["caseFirst"] = common.validate_string("caseFirst", caseFirst) if strength is not None: self.__document["strength"] = common.validate_integer("strength", strength) if numericOrdering is not None: - self.__document["numericOrdering"] = common.validate_boolean( + self.__document["numericOrdering"] = validate_boolean( "numericOrdering", numericOrdering ) if alternate is not None: @@ -184,11 +184,9 @@ def __init__( if maxVariable is not None: self.__document["maxVariable"] = common.validate_string("maxVariable", maxVariable) if normalization is not None: - self.__document["normalization"] = common.validate_boolean( - "normalization", normalization - ) + self.__document["normalization"] = validate_boolean("normalization", normalization) if backwards is not None: - self.__document["backwards"] = common.validate_boolean("backwards", backwards) + self.__document["backwards"] = validate_boolean("backwards", backwards) self.__document.update(kwargs) @property diff --git a/pymongo/collection.py b/pymongo/collection.py index 7f4354e7d1..ddfe9f1df8 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -70,7 +70,9 @@ UpdateOne, _IndexKeyHint, _IndexList, + _Op, ) +from pymongo.read_concern import DEFAULT_READ_CONCERN, ReadConcern from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.results import ( BulkWriteResult, @@ -80,7 +82,7 @@ UpdateResult, ) from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline -from pymongo.write_concern import WriteConcern +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean T = TypeVar("T") @@ -112,14 +114,11 @@ class ReturnDocument: if TYPE_CHECKING: - - import bson from pymongo.aggregation import _AggregationCommand from pymongo.client_session import ClientSession from pymongo.collation import Collation from pymongo.database import Database from pymongo.pool import Connection - from pymongo.read_concern import ReadConcern from pymongo.server import Server @@ -154,29 +153,28 @@ def __init__( use. The optional ``session`` argument is *only* used for the ``create`` command, it is not associated with the collection afterward. - :Parameters: - - `database`: the database to get a collection from - - `name`: the name of the collection to get - - `create` (optional): if ``True``, force collection + :param database: the database to get a collection from + :param name: the name of the collection to get + :param create: if ``True``, force collection creation even without options being set - - `codec_options` (optional): An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the default) database.codec_options is used. - - `read_preference` (optional): The read preference to use. If + :param read_preference: The read preference to use. If ``None`` (the default) database.read_preference is used. - - `write_concern` (optional): An instance of + :param write_concern: An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the default) database.write_concern is used. - - `read_concern` (optional): An instance of + :param read_concern: An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) database.read_concern is used. - - `collation` (optional): An instance of + :param collation: An instance of :class:`~pymongo.collation.Collation`. If a collation is provided, it will be passed to the create collection command. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession` that is used with the create collection command - - `**kwargs` (optional): additional keyword arguments will + :param kwargs: additional keyword arguments will be passed as options for the create collection command .. versionchanged:: 4.2 @@ -254,13 +252,10 @@ def __init__( else: self.__create(name, kwargs, collation, session) - def _conn_for_reads( - self, session: ClientSession - ) -> ContextManager[tuple[Connection, _ServerMode]]: - return self.__database.client._conn_for_reads(self._read_preference_for(session), session) - - def _conn_for_writes(self, session: Optional[ClientSession]) -> ContextManager[Connection]: - return self.__database.client._conn_for_writes(session) + def _conn_for_writes( + self, session: Optional[ClientSession], operation: str + ) -> ContextManager[Connection]: + return self.__database.client._conn_for_writes(session, operation) def _command( self, @@ -279,30 +274,28 @@ def _command( ) -> Mapping[str, Any]: """Internal command helper. - :Parameters: - - `conn` - A Connection instance. - - `command` - The command itself, as a :class:`~bson.son.SON` instance. - - `read_preference` (optional) - The read preference to use. - - `codec_options` (optional) - An instance of + :param conn` - A Connection instance. + :param command` - The command itself, as a :class:`~bson.son.SON` instance. + :param read_preference` (optional) - The read preference to use. + :param codec_options` (optional) - An instance of :class:`~bson.codec_options.CodecOptions`. - - `check`: raise OperationFailure if there are errors - - `allowable_errors`: errors to ignore if `check` is True - - `read_concern` (optional) - An instance of + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param read_concern` (optional) - An instance of :class:`~pymongo.read_concern.ReadConcern`. - - `write_concern`: An instance of + :param write_concern: An instance of :class:`~pymongo.write_concern.WriteConcern`. - - `collation` (optional) - An instance of + :param collation` (optional) - An instance of :class:`~pymongo.collation.Collation`. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `retryable_write` (optional): True if this command is a retryable + :param retryable_write: True if this command is a retryable write. - - `user_fields` (optional): Response fields that should be decoded + :param user_fields: Response fields that should be decoded using the TypeDecoders from codec_options, passed to bson._decode_all_selective. - :Returns: - The result document. + :return: The result document. """ with self.__database.client._tmp_session(session) as s: return conn.command( @@ -332,7 +325,7 @@ def __create( qev2_required: bool = False, ) -> None: """Sends a create command with the given options.""" - cmd: SON[str, Any] = SON([("create", name)]) + cmd: dict[str, Any] = {"create": name} if encrypted_fields: cmd["encryptedFields"] = encrypted_fields @@ -340,7 +333,7 @@ def __create( if "size" in options: options["size"] = float(options["size"]) cmd.update(options) - with self._conn_for_writes(session) as conn: + with self._conn_for_writes(session, operation=_Op.CREATE) as conn: if qev2_required and conn.max_wire_version < 21: raise ConfigurationError( "Driver support of Queryable Encryption is incompatible with server. " @@ -362,8 +355,7 @@ def __getattr__(self, name: str) -> Collection[_DocumentType]: Raises InvalidName if an invalid collection name is used. - :Parameters: - - `name`: the name of the collection to get + :param name: the name of the collection to get """ if name.startswith("_"): full_name = f"{self.__name}.{name}" @@ -427,7 +419,7 @@ def database(self) -> Database[_DocumentType]: def with_options( self, - codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional[ReadConcern] = None, @@ -443,20 +435,19 @@ def with_options( >>> coll2.read_preference Secondary(tag_sets=None) - :Parameters: - - `codec_options` (optional): An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the default) the :attr:`codec_options` of this :class:`Collection` is used. - - `read_preference` (optional): The read preference to use. If + :param read_preference: The read preference to use. If ``None`` (the default) the :attr:`read_preference` of this :class:`Collection` is used. See :mod:`~pymongo.read_preferences` for options. - - `write_concern` (optional): An instance of + :param write_concern: An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the default) the :attr:`write_concern` of this :class:`Collection` is used. - - `read_concern` (optional): An instance of + :param read_concern: An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) the :attr:`read_concern` of this :class:`Collection` is used. @@ -517,27 +508,25 @@ def bulk_write( {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} - :Parameters: - - `requests`: A list of write operations (see examples above). - - `ordered` (optional): If ``True`` (the default) requests will be + :param requests: A list of write operations (see examples above). + :param ordered: If ``True`` (the default) requests will be performed on the server serially, in the order provided. If an error occurs all remaining operations are aborted. If ``False`` requests will be performed on the server in arbitrary order, possibly in parallel, and all operations will be attempted. - - `bypass_document_validation`: (optional) If ``True``, allows the + :param bypass_document_validation: (optional) If ``True``, allows the write to opt-out of document level validation. Default is ``False``. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `let` (optional): Map of parameter names and values. Values must be + :param let: Map of parameter names and values. Values must be constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - :Returns: - An instance of :class:`~pymongo.results.BulkWriteResult`. + :return: An instance of :class:`~pymongo.results.BulkWriteResult`. .. seealso:: :ref:`writes-and-ids` @@ -566,7 +555,7 @@ def bulk_write( raise TypeError(f"{request!r} is not a valid request") from None write_concern = self._write_concern_for(session) - bulk_api_result = blk.execute(write_concern, session) + bulk_api_result = blk.execute(write_concern, session, _Op.INSERT) if bulk_api_result is not None: return BulkWriteResult(bulk_api_result, True) return BulkWriteResult({}, False) @@ -584,7 +573,7 @@ def _insert_one( """Internal helper for inserting a single document.""" write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged - command = SON([("insert", self.name), ("ordered", ordered), ("documents", [doc])]) + command = {"insert": self.name, "ordered": ordered, "documents": [doc]} if comment is not None: command["comment"] = comment @@ -606,7 +595,9 @@ def _insert_command( _check_write_command_response(result) - self.__database.client._retryable_write(acknowledged, _insert_command, session) + self.__database.client._retryable_write( + acknowledged, _insert_command, session, operation=_Op.INSERT + ) if not isinstance(doc, RawBSONDocument): return doc.get("_id") @@ -629,20 +620,18 @@ def insert_one( >>> db.test.find_one({'x': 1}) {'x': 1, '_id': ObjectId('54f112defba522406c9cc208')} - :Parameters: - - `document`: The document to insert. Must be a mutable mapping + :param document: The document to insert. Must be a mutable mapping type. If the document does not have an _id field one will be added automatically. - - `bypass_document_validation`: (optional) If ``True``, allows the + :param bypass_document_validation: (optional) If ``True``, allows the write to opt-out of document level validation. Default is ``False``. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - :Returns: - - An instance of :class:`~pymongo.results.InsertOneResult`. + :return: - An instance of :class:`~pymongo.results.InsertOneResult`. .. seealso:: :ref:`writes-and-ids` @@ -697,23 +686,21 @@ def insert_many( >>> db.test.count_documents({}) 2 - :Parameters: - - `documents`: A iterable of documents to insert. - - `ordered` (optional): If ``True`` (the default) documents will be + :param documents: A iterable of documents to insert. + :param ordered: If ``True`` (the default) documents will be inserted on the server serially, in the order provided. If an error occurs all remaining inserts are aborted. If ``False``, documents will be inserted on the server in arbitrary order, possibly in parallel, and all document inserts will be attempted. - - `bypass_document_validation`: (optional) If ``True``, allows the + :param bypass_document_validation: (optional) If ``True``, allows the write to opt-out of document level validation. Default is ``False``. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - :Returns: - An instance of :class:`~pymongo.results.InsertManyResult`. + :return: An instance of :class:`~pymongo.results.InsertManyResult`. .. seealso:: :ref:`writes-and-ids` @@ -752,7 +739,7 @@ def gen() -> Iterator[tuple[int, Mapping[str, Any]]]: write_concern = self._write_concern_for(session) blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) blk.ops = list(gen()) - blk.execute(write_concern, session=session) + blk.execute(write_concern, session, _Op.INSERT) return InsertManyResult(inserted_ids, write_concern.acknowledged) def _update( @@ -775,13 +762,16 @@ def _update( comment: Optional[Any] = None, ) -> Optional[Mapping[str, Any]]: """Internal update / replace helper.""" - common.validate_boolean("upsert", upsert) + validate_boolean("upsert", upsert) collation = validate_collation_or_none(collation) write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged - update_doc: SON[str, Any] = SON( - [("q", criteria), ("u", document), ("multi", multi), ("upsert", upsert)] - ) + update_doc: dict[str, Any] = { + "q": criteria, + "u": document, + "multi": multi, + "upsert": upsert, + } if collation is not None: if not acknowledged: raise ConfigurationError("Collation is unsupported for unacknowledged writes.") @@ -800,7 +790,7 @@ def _update( if not isinstance(hint, str): hint = helpers._index_document(hint) update_doc["hint"] = hint - command = SON([("update", self.name), ("ordered", ordered), ("updates", [update_doc])]) + command = {"update": self.name, "ordered": ordered, "updates": [update_doc]} if let is not None: common.validate_is_mapping("let", let) command["let"] = let @@ -841,6 +831,7 @@ def _update_retryable( self, criteria: Mapping[str, Any], document: Union[Mapping[str, Any], _Pipeline], + operation: str, upsert: bool = False, multi: bool = False, write_concern: Optional[WriteConcern] = None, @@ -879,7 +870,10 @@ def _update( ) return self.__database.client._retryable_write( - (write_concern or self.write_concern).acknowledged and not multi, _update, session + (write_concern or self.write_concern).acknowledged and not multi, + _update, + session, + operation, ) def replace_one( @@ -923,32 +917,30 @@ def replace_one( >>> db.test.find_one({'x': 1}) {'x': 1, '_id': ObjectId('54f11e5c8891e756a6e1abd4')} - :Parameters: - - `filter`: A query that matches the document to replace. - - `replacement`: The new document. - - `upsert` (optional): If ``True``, perform an insert if no documents + :param filter: A query that matches the document to replace. + :param replacement: The new document. + :param upsert: If ``True``, perform an insert if no documents match the filter. - - `bypass_document_validation`: (optional) If ``True``, allows the + :param bypass_document_validation: (optional) If ``True``, allows the write to opt-out of document level validation. Default is ``False``. - - `collation` (optional): An instance of + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.2 and above. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be + :param let: Map of parameter names and values. Values must be constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - :Returns: - - An instance of :class:`~pymongo.results.UpdateResult`. + :return: - An instance of :class:`~pymongo.results.UpdateResult`. .. versionchanged:: 4.1 Added ``let`` parameter. @@ -973,6 +965,7 @@ def replace_one( self._update_retryable( filter, replacement, + _Op.UPDATE, upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, @@ -1031,35 +1024,33 @@ def update_one( >>> db.test.find_one(result.upserted_id) {'_id': ObjectId('626a678eeaa80587d4bb3fb7'), 'x': -7} - :Parameters: - - `filter`: A query that matches the document to update. - - `update`: The modifications to apply. - - `upsert` (optional): If ``True``, perform an insert if no documents + :param filter: A query that matches the document to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents match the filter. - - `bypass_document_validation`: (optional) If ``True``, allows the + :param bypass_document_validation: (optional) If ``True``, allows the write to opt-out of document level validation. Default is ``False``. - - `collation` (optional): An instance of + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `array_filters` (optional): A list of filters specifying which + :param array_filters: A list of filters specifying which array elements an update should apply. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.2 and above. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be + :param let: Map of parameter names and values. Values must be constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - :Returns: - - An instance of :class:`~pymongo.results.UpdateResult`. + :return: - An instance of :class:`~pymongo.results.UpdateResult`. .. versionchanged:: 4.1 Added ``let`` parameter. @@ -1086,6 +1077,7 @@ def update_one( self._update_retryable( filter, update, + _Op.UPDATE, upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, @@ -1132,35 +1124,33 @@ def update_many( {'x': 4, '_id': 1} {'x': 4, '_id': 2} - :Parameters: - - `filter`: A query that matches the documents to update. - - `update`: The modifications to apply. - - `upsert` (optional): If ``True``, perform an insert if no documents + :param filter: A query that matches the documents to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents match the filter. - - `bypass_document_validation` (optional): If ``True``, allows the + :param bypass_document_validation: If ``True``, allows the write to opt-out of document level validation. Default is ``False``. - - `collation` (optional): An instance of + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `array_filters` (optional): A list of filters specifying which + :param array_filters: A list of filters specifying which array elements an update should apply. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.2 and above. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be + :param let: Map of parameter names and values. Values must be constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - :Returns: - - An instance of :class:`~pymongo.results.UpdateResult`. + :return: - An instance of :class:`~pymongo.results.UpdateResult`. .. versionchanged:: 4.1 Added ``let`` parameter. @@ -1187,6 +1177,7 @@ def update_many( self._update_retryable( filter, update, + _Op.UPDATE, upsert, multi=True, write_concern=write_concern, @@ -1209,12 +1200,11 @@ def drop( ) -> None: """Alias for :meth:`~pymongo.database.Database.drop_collection`. - :Parameters: - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for + :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for Queryable Encryption. The following two calls are equivalent: @@ -1264,7 +1254,7 @@ def _delete( common.validate_is_mapping("filter", criteria) write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged - delete_doc = SON([("q", criteria), ("limit", int(not multi))]) + delete_doc = {"q": criteria, "limit": int(not multi)} collation = validate_collation_or_none(collation) if collation is not None: if not acknowledged: @@ -1279,7 +1269,7 @@ def _delete( if not isinstance(hint, str): hint = helpers._index_document(hint) delete_doc["hint"] = hint - command = SON([("delete", self.name), ("ordered", ordered), ("deletes", [delete_doc])]) + command = {"delete": self.name, "ordered": ordered, "deletes": [delete_doc]} if let is not None: common.validate_is_document_type("let", let) @@ -1335,7 +1325,10 @@ def _delete( ) return self.__database.client._retryable_write( - (write_concern or self.write_concern).acknowledged and not multi, _delete, session + (write_concern or self.write_concern).acknowledged and not multi, + _delete, + session, + operation=_Op.DELETE, ) def delete_one( @@ -1357,27 +1350,25 @@ def delete_one( >>> db.test.count_documents({'x': 1}) 2 - :Parameters: - - `filter`: A query that matches the document to delete. - - `collation` (optional): An instance of + :param filter: A query that matches the document to delete. + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.4 and above. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be + :param let: Map of parameter names and values. Values must be constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - :Returns: - - An instance of :class:`~pymongo.results.DeleteResult`. + :return: - An instance of :class:`~pymongo.results.DeleteResult`. .. versionchanged:: 4.1 Added ``let`` parameter. @@ -1424,27 +1415,25 @@ def delete_many( >>> db.test.count_documents({'x': 1}) 0 - :Parameters: - - `filter`: A query that matches the documents to delete. - - `collation` (optional): An instance of + :param filter: A query that matches the documents to delete. + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.4 and above. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be + :param let: Map of parameter names and values. Values must be constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - :Returns: - - An instance of :class:`~pymongo.results.DeleteResult`. + :return: - An instance of :class:`~pymongo.results.DeleteResult`. .. versionchanged:: 4.1 Added ``let`` parameter. @@ -1485,18 +1474,18 @@ def find_one( The :meth:`find_one` method obeys the :attr:`read_preference` of this :class:`Collection`. - :Parameters: - - - `filter` (optional): a dictionary specifying + :param filter: a dictionary specifying the query to be performed OR any other type to be used as the value for a query for ``"_id"``. - - `*args` (optional): any additional positional arguments + :param args: any additional positional arguments are the same as the arguments to :meth:`find`. - - `**kwargs` (optional): any additional keyword arguments + :param kwargs: any additional keyword arguments are the same as the arguments to :meth:`find`. + :: code-block: python + >>> collection.find_one(max_time_ms=100) """ @@ -1529,28 +1518,27 @@ def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: The :meth:`find` method obeys the :attr:`read_preference` of this :class:`Collection`. - :Parameters: - - `filter` (optional): A query document that selects which documents + :param filter: A query document that selects which documents to include in the result set. Can be an empty document to include all documents. - - `projection` (optional): a list of field names that should be + :param projection: a list of field names that should be returned in the result set or a dict specifying the fields to include or exclude. If `projection` is a list "_id" will always be returned. Use a dict to exclude fields from the result (e.g. projection={'_id': False}). - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `skip` (optional): the number of documents to omit (from + :param skip: the number of documents to omit (from the start of the result set) when returning the results - - `limit` (optional): the maximum number of results to + :param limit: the maximum number of results to return. A limit of 0 (the default) is equivalent to setting no limit. - - `no_cursor_timeout` (optional): if False (the default), any + :param no_cursor_timeout: if False (the default), any returned cursor is closed by the server after 10 minutes of inactivity. If set to True, the returned cursor will never time out on the server. Care should be taken to ensure that cursors with no_cursor_timeout turned on are properly closed. - - `cursor_type` (optional): the type of cursor to return. The valid + :param cursor_type: the type of cursor to return. The valid options are defined by :class:`~pymongo.cursor.CursorType`: - :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of @@ -1573,53 +1561,53 @@ def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: results to the client without waiting for the client to request each batch, reducing latency. See notes on compatibility below. - - `sort` (optional): a list of (key, direction) pairs + :param sort: a list of (key, direction) pairs specifying the sort order for this query. See :meth:`~pymongo.cursor.Cursor.sort` for details. - - `allow_partial_results` (optional): if True, mongos will return + :param allow_partial_results: if True, mongos will return partial results if some shards are down instead of returning an error. - - `oplog_replay` (optional): **DEPRECATED** - if True, set the + :param oplog_replay: **DEPRECATED** - if True, set the oplogReplay query flag. Default: False. - - `batch_size` (optional): Limits the number of documents returned in + :param batch_size: Limits the number of documents returned in a single batch. - - `collation` (optional): An instance of + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `return_key` (optional): If True, return only the index keys in + :param return_key: If True, return only the index keys in each document. - - `show_record_id` (optional): If True, adds a field ``$recordId`` in + :param show_record_id: If True, adds a field ``$recordId`` in each document with the storage engine's internal record identifier. - - `snapshot` (optional): **DEPRECATED** - If True, prevents the + :param snapshot: **DEPRECATED** - If True, prevents the cursor from returning a document more than once because of an intervening write operation. - - `hint` (optional): An index, in the same format as passed to + :param hint: An index, in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). Pass this as an alternative to calling :meth:`~pymongo.cursor.Cursor.hint` on the cursor to tell Mongo the proper index to use for the query. - - `max_time_ms` (optional): Specifies a time limit for a query + :param max_time_ms: Specifies a time limit for a query operation. If the specified time is exceeded, the operation will be aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. Pass this as an alternative to calling :meth:`~pymongo.cursor.Cursor.max_time_ms` on the cursor. - - `max_scan` (optional): **DEPRECATED** - The maximum number of + :param max_scan: **DEPRECATED** - The maximum number of documents to scan. Pass this as an alternative to calling :meth:`~pymongo.cursor.Cursor.max_scan` on the cursor. - - `min` (optional): A list of field, limit pairs specifying the + :param min: A list of field, limit pairs specifying the inclusive lower bound for all keys of a specific index in order. Pass this as an alternative to calling :meth:`~pymongo.cursor.Cursor.min` on the cursor. ``hint`` must also be passed to ensure the query utilizes the correct index. - - `max` (optional): A list of field, limit pairs specifying the + :param max: A list of field, limit pairs specifying the exclusive upper bound for all keys of a specific index in order. Pass this as an alternative to calling :meth:`~pymongo.cursor.Cursor.max` on the cursor. ``hint`` must also be passed to ensure the query utilizes the correct index. - - `comment` (optional): A string to attach to the query to help + :param comment: A string to attach to the query to help interpret and trace the operation in the server logs and in profile data. Pass this as an alternative to calling :meth:`~pymongo.cursor.Cursor.comment` on the cursor. - - `allow_disk_use` (optional): if True, MongoDB may use temporary + :param allow_disk_use: if True, MongoDB may use temporary disk files to store data exceeding the system memory limit while processing a blocking sort operation. The option has no effect if MongoDB can satisfy the specified sort using an index, or if the @@ -1732,7 +1720,7 @@ def _count_cmd( session: Optional[ClientSession], conn: Connection, read_preference: Optional[_ServerMode], - cmd: SON[str, Any], + cmd: dict[str, Any], collation: Optional[Collation], ) -> int: """Internal count command helper.""" @@ -1756,7 +1744,7 @@ def _aggregate_one_result( self, conn: Connection, read_preference: Optional[_ServerMode], - cmd: SON[str, Any], + cmd: dict[str, Any], collation: Optional[_CollationIn], session: Optional[ClientSession], ) -> Optional[Mapping[str, Any]]: @@ -1790,10 +1778,9 @@ def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) - `maxTimeMS` (int): The maximum amount of time to allow this operation to run, in milliseconds. - :Parameters: - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): See list of options above. + :param kwargs: See list of options above. .. versionchanged:: 4.2 This method now always uses the `count`_ command. Due to an oversight in versions @@ -1816,11 +1803,11 @@ def _cmd( conn: Connection, read_preference: Optional[_ServerMode], ) -> int: - cmd: SON[str, Any] = SON([("count", self.__name)]) + cmd: dict[str, Any] = {"count": self.__name} cmd.update(kwargs) return self._count_cmd(session, conn, read_preference, cmd, collation=None) - return self._retryable_non_cursor_read(_cmd, None) + return self._retryable_non_cursor_read(_cmd, None, operation=_Op.COUNT) def count_documents( self, @@ -1867,15 +1854,14 @@ def count_documents( | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | +-------------+-------------------------------------+ - :Parameters: - - `filter` (required): A query document that selects which documents + :param filter: A query document that selects which documents to count in the collection. Can be an empty document to count all documents. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): See list of options above. + :param kwargs: See list of options above. .. versionadded:: 3.7 @@ -1893,7 +1879,7 @@ def count_documents( if comment is not None: kwargs["comment"] = comment pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) - cmd = SON([("aggregate", self.__name), ("pipeline", pipeline), ("cursor", {})]) + cmd = {"aggregate": self.__name, "pipeline": pipeline, "cursor": {}} if "hint" in kwargs and not isinstance(kwargs["hint"], str): kwargs["hint"] = helpers._index_document(kwargs["hint"]) collation = validate_collation_or_none(kwargs.pop("collation", None)) @@ -1910,17 +1896,18 @@ def _cmd( return 0 return result["n"] - return self._retryable_non_cursor_read(_cmd, session) + return self._retryable_non_cursor_read(_cmd, session, _Op.COUNT) def _retryable_non_cursor_read( self, func: Callable[[Optional[ClientSession], Server, Connection, Optional[_ServerMode]], T], session: Optional[ClientSession], + operation: str, ) -> T: """Non-cursor read helper to handle implicit session creation.""" client = self.__database.client with client._tmp_session(session) as s: - return client._retryable_read(func, self._read_preference_for(s), s) + return client._retryable_read(func, self._read_preference_for(s), s, operation) def create_indexes( self, @@ -1938,14 +1925,13 @@ def create_indexes( >>> db.test.create_indexes([index1, index2]) ["hello_world", "goodbye_-1"] - :Parameters: - - `indexes`: A list of :class:`~pymongo.operations.IndexModel` + :param indexes: A list of :class:`~pymongo.operations.IndexModel` instances. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): optional arguments to the createIndexes + :param kwargs: optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. @@ -1976,16 +1962,15 @@ def __create_indexes( ) -> list[str]: """Internal createIndexes helper. - :Parameters: - - `indexes`: A list of :class:`~pymongo.operations.IndexModel` + :param indexes: A list of :class:`~pymongo.operations.IndexModel` instances. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): optional arguments to the createIndexes + :param kwargs: optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. """ names = [] - with self._conn_for_writes(session) as conn: + with self._conn_for_writes(session, operation=_Op.CREATE_INDEXES) as conn: supports_quorum = conn.max_wire_version >= 9 def gen_indexes() -> Iterator[Mapping[str, Any]]: @@ -1998,7 +1983,7 @@ def gen_indexes() -> Iterator[Mapping[str, Any]]: names.append(document["name"]) yield document - cmd = SON([("createIndexes", self.name), ("indexes", list(gen_indexes()))]) + cmd = {"createIndexes": self.name, "indexes": list(gen_indexes())} cmd.update(kwargs) if "commitQuorum" in kwargs and not supports_quorum: raise ConfigurationError( @@ -2028,7 +2013,7 @@ def create_index( Takes either a single key or a list containing (key, direction) pairs or keys. If no direction is given, :data:`~pymongo.ASCENDING` will be assumed. - The key(s) must be an instance of :class:`str`and the direction(s) must + The key(s) must be an instance of :class:`str` and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). @@ -2092,14 +2077,13 @@ def create_index( .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation. - :Parameters: - - `keys`: a single key or a list of (key, direction) + :param keys: a single key or a list of (key, direction) pairs specifying the index to create - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): any additional index creation + :param kwargs: any additional index creation options (see the above list) should be passed as keyword arguments. @@ -2146,12 +2130,11 @@ def drop_indexes( Can be used on non-existent collections or collections with no indexes. Raises OperationFailure on an error. - :Parameters: - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): optional arguments to the createIndexes + :param kwargs: optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of @@ -2193,13 +2176,12 @@ def drop_index( passing the `name` parameter to :meth:`create_index`) the index **must** be dropped by name. - :Parameters: - - `index_or_name`: index (or name of index) to drop - - `session` (optional): a + :param index_or_name: index (or name of index) to drop + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): optional arguments to the createIndexes + :param kwargs: optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. @@ -2224,11 +2206,11 @@ def drop_index( if not isinstance(name, str): raise TypeError("index_or_name must be an instance of str or list") - cmd = SON([("dropIndexes", self.__name), ("index", name)]) + cmd = {"dropIndexes": self.__name, "index": name} cmd.update(kwargs) if comment is not None: cmd["comment"] = comment - with self._conn_for_writes(session) as conn: + with self._conn_for_writes(session, operation=_Op.DROP_INDEXES) as conn: self._command( conn, cmd, @@ -2250,14 +2232,12 @@ def list_indexes( ... SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')]) - :Parameters: - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - :Returns: - An instance of :class:`~pymongo.command_cursor.CommandCursor`. + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. .. versionchanged:: 4.1 Added ``comment`` parameter. @@ -2281,7 +2261,7 @@ def _cmd( conn: Connection, read_preference: _ServerMode, ) -> CommandCursor[MutableMapping[str, Any]]: - cmd = SON([("listIndexes", self.__name), ("cursor", {})]) + cmd = {"listIndexes": self.__name, "cursor": {}} if comment is not None: cmd["comment"] = comment @@ -2307,7 +2287,9 @@ def _cmd( return cmd_cursor with self.__database.client._tmp_session(session, False) as s: - return self.__database.client._retryable_read(_cmd, read_pref, s) + return self.__database.client._retryable_read( + _cmd, read_pref, s, operation=_Op.LIST_INDEXES + ) def index_information( self, @@ -2332,10 +2314,9 @@ def index_information( {'_id_': {'key': [('_id', 1)]}, 'x_1': {'unique': True, 'key': [('x', 1)]}} - :Parameters: - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. .. versionchanged:: 4.1 @@ -2361,17 +2342,15 @@ def list_search_indexes( ) -> CommandCursor[Mapping[str, Any]]: """Return a cursor over search indexes for the current collection. - :Parameters: - - `name` (optional): If given, the name of the index to search + :param name: If given, the name of the index to search for. Only indexes with matching index names will be returned. If not given, all search indexes for the current collection will be returned. - - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param session: a :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this command. - :Returns: - A :class:`~pymongo.command_cursor.CommandCursor` over the result + :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result set. .. note:: requires a MongoDB server version 7.0+ Atlas cluster. @@ -2384,7 +2363,10 @@ def list_search_indexes( pipeline = [{"$listSearchIndexes": {"name": name}}] coll = self.with_options( - codec_options=DEFAULT_CODEC_OPTIONS, read_preference=ReadPreference.PRIMARY + codec_options=DEFAULT_CODEC_OPTIONS, + read_preference=ReadPreference.PRIMARY, + write_concern=DEFAULT_WRITE_CONCERN, + read_concern=DEFAULT_READ_CONCERN, ) cmd = _CollectionAggregationCommand( coll, @@ -2392,6 +2374,7 @@ def list_search_indexes( pipeline, kwargs, explicit_session=session is not None, + comment=comment, user_fields={"cursor": {"firstBatch": 1}}, ) @@ -2400,6 +2383,7 @@ def list_search_indexes( cmd.get_read_preference(session), # type: ignore[arg-type] session, retryable=not cmd._performs_write, + operation=_Op.LIST_SEARCH_INDEX, ) def create_search_index( @@ -2411,27 +2395,25 @@ def create_search_index( ) -> str: """Create a single search index for the current collection. - :Parameters: - - `model`: The model for the new search index. + :param model: The model for the new search index. It can be given as a :class:`~pymongo.operations.SearchIndexModel` instance or a dictionary with a model "definition" and optional "name". - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): optional arguments to the createSearchIndexes + :param kwargs: optional arguments to the createSearchIndexes command (like maxTimeMS) can be passed as keyword arguments. - :Returns: - The name of the new search index. + :return: The name of the new search index. .. note:: requires a MongoDB server version 7.0+ Atlas cluster. .. versionadded:: 4.5 """ if not isinstance(model, SearchIndexModel): - model = SearchIndexModel(model["definition"], model.get("name")) + model = SearchIndexModel(**model) return self.create_search_indexes([model], session, comment, **kwargs)[0] def create_search_indexes( @@ -2443,16 +2425,14 @@ def create_search_indexes( ) -> list[str]: """Create multiple search indexes for the current collection. - :Parameters: - - `models`: A list of :class:`~pymongo.operations.SearchIndexModel` instances. - - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param models: A list of :class:`~pymongo.operations.SearchIndexModel` instances. + :param session: a :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): optional arguments to the createSearchIndexes + :param kwargs: optional arguments to the createSearchIndexes command (like maxTimeMS) can be passed as keyword arguments. - :Returns: - A list of the newly created search index names. + :return: A list of the newly created search index names. .. note:: requires a MongoDB server version 7.0+ Atlas cluster. @@ -2469,10 +2449,10 @@ def gen_indexes() -> Iterator[Mapping[str, Any]]: ) yield index.document - cmd = SON([("createSearchIndexes", self.name), ("indexes", list(gen_indexes()))]) + cmd = {"createSearchIndexes": self.name, "indexes": list(gen_indexes())} cmd.update(kwargs) - with self._conn_for_writes(session) as conn: + with self._conn_for_writes(session, operation=_Op.CREATE_SEARCH_INDEXES) as conn: resp = self._command( conn, cmd, @@ -2490,24 +2470,23 @@ def drop_search_index( ) -> None: """Delete a search index by index name. - :Parameters: - - `name`: The name of the search index to be deleted. - - `session` (optional): a + :param name: The name of the search index to be deleted. + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): optional arguments to the dropSearchIndexes + :param kwargs: optional arguments to the dropSearchIndexes command (like maxTimeMS) can be passed as keyword arguments. .. note:: requires a MongoDB server version 7.0+ Atlas cluster. .. versionadded:: 4.5 """ - cmd = SON([("dropSearchIndex", self.__name), ("name", name)]) + cmd = {"dropSearchIndex": self.__name, "name": name} cmd.update(kwargs) if comment is not None: cmd["comment"] = comment - with self._conn_for_writes(session) as conn: + with self._conn_for_writes(session, operation=_Op.DROP_SEARCH_INDEXES) as conn: self._command( conn, cmd, @@ -2526,25 +2505,24 @@ def update_search_index( ) -> None: """Update a search index by replacing the existing index definition with the provided definition. - :Parameters: - - `name`: The name of the search index to be updated. - - `definition`: The new search index definition. - - `session` (optional): a + :param name: The name of the search index to be updated. + :param definition: The new search index definition. + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): optional arguments to the updateSearchIndexes + :param kwargs: optional arguments to the updateSearchIndexes command (like maxTimeMS) can be passed as keyword arguments. .. note:: requires a MongoDB server version 7.0+ Atlas cluster. .. versionadded:: 4.5 """ - cmd = SON([("updateSearchIndex", self.__name), ("name", name), ("definition", definition)]) + cmd = {"updateSearchIndex": self.__name, "name": name, "definition": definition} cmd.update(kwargs) if comment is not None: cmd["comment"] = comment - with self._conn_for_writes(session) as conn: + with self._conn_for_writes(session, operation=_Op.UPDATE_SEARCH_INDEX) as conn: self._command( conn, cmd, @@ -2565,10 +2543,9 @@ def options( information on the possible options. Returns an empty dictionary if the collection has not been created yet. - :Parameters: - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. .. versionchanged:: 3.6 @@ -2629,6 +2606,7 @@ def _aggregate( cmd.get_read_preference(session), # type: ignore[arg-type] session, retryable=not cmd._performs_write, + operation=_Op.AGGREGATE, ) def aggregate( @@ -2655,11 +2633,17 @@ def aggregate( .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation. - :Parameters: - - `pipeline`: a list of aggregation pipeline stages - - `session` (optional): a + :param pipeline: a list of aggregation pipeline stages + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): extra `aggregate command`_ parameters. + :param let: A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: extra `aggregate command`_ parameters. All optional `aggregate command`_ parameters should be passed as keyword arguments to this method. Valid options include, but are not @@ -2675,17 +2659,9 @@ def aggregate( returning aggregate results using a cursor. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. - - `let` (dict): A dict of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. ``"$$var"``). This option is - only supported on MongoDB >= 5.0. - - `comment` (optional): A user-provided comment to attach to this - command. - :Returns: - A :class:`~pymongo.command_cursor.CommandCursor` over the result + :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result set. .. versionchanged:: 4.1 @@ -2836,47 +2812,45 @@ def watch( ``ReadConcern("majority")`` in order to use the ``$changeStream`` stage. - :Parameters: - - `pipeline` (optional): A list of aggregation pipeline stages to + :param pipeline: A list of aggregation pipeline stages to append to an initial ``$changeStream`` stage. Not all pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - - `full_document` (optional): The fullDocument to pass as an option + :param full_document: The fullDocument to pass as an option to the ``$changeStream`` stage. Allowed values: 'updateLookup', 'whenAvailable', 'required'. When set to 'updateLookup', the change notification for partial updates will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred. - - `full_document_before_change`: Allowed values: 'whenAvailable' + :param full_document_before_change: Allowed values: 'whenAvailable' and 'required'. Change events may now result in a 'fullDocumentBeforeChange' response field. - - `resume_after` (optional): A resume token. If provided, the + :param resume_after: A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token is the _id value of a change document. - - `max_await_time_ms` (optional): The maximum time in milliseconds + :param max_await_time_ms: The maximum time in milliseconds for the server to wait for changes before responding to a getMore operation. - - `batch_size` (optional): The maximum number of documents to return + :param batch_size: The maximum number of documents to return per batch. - - `collation` (optional): The :class:`~pymongo.collation.Collation` + :param collation: The :class:`~pymongo.collation.Collation` to use for the aggregation. - - `start_at_operation_time` (optional): If provided, the resulting + :param start_at_operation_time: If provided, the resulting change stream will only return changes that occurred at or after the specified :class:`~bson.timestamp.Timestamp`. Requires MongoDB >= 4.0. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `start_after` (optional): The same as `resume_after` except that + :param start_after: The same as `resume_after` except that `start_after` can resume notifications after an invalidate event. This option and `resume_after` are mutually exclusive. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `show_expanded_events` (optional): Include expanded events such as DDL events like `dropIndexes`. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. - :Returns: - A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. + :return: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. .. versionchanged:: 4.3 Added `show_expanded_events` parameter. @@ -2898,7 +2872,7 @@ def watch( .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md """ return CollectionChangeStream( self, @@ -2932,13 +2906,12 @@ def rename( Raises :class:`~pymongo.errors.InvalidName` if `new_name` is not a valid collection name. - :Parameters: - - `new_name`: new name for this collection - - `session` (optional): a + :param new_name: new name for this collection + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): additional arguments to the rename command + :param kwargs: additional arguments to the rename command may be passed as keyword arguments to this helper method (i.e. ``dropTarget=True``) @@ -2964,13 +2937,13 @@ def rename( raise InvalidName("collection names must not contain '$'") new_name = f"{self.__database.name}.{new_name}" - cmd = SON([("renameCollection", self.__full_name), ("to", new_name)]) + cmd = {"renameCollection": self.__full_name, "to": new_name} cmd.update(kwargs) if comment is not None: cmd["comment"] = comment write_concern = self._write_concern_for_cmd(cmd, session) - with self._conn_for_writes(session) as conn: + with self._conn_for_writes(session, operation=_Op.RENAME) as conn: with self.__database.client._tmp_session(session) as s: return conn.command( "admin", @@ -3006,16 +2979,15 @@ def distinct( The :meth:`distinct` method obeys the :attr:`read_preference` of this :class:`Collection`. - :Parameters: - - `key`: name of the field for which we want to get the distinct + :param key: name of the field for which we want to get the distinct values - - `filter` (optional): A query document that specifies the documents + :param filter: A query document that specifies the documents from which to retrieve the distinct values. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): See list of options above. + :param kwargs: See list of options above. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -3026,7 +2998,7 @@ def distinct( """ if not isinstance(key, str): raise TypeError("key must be an instance of str") - cmd = SON([("distinct", self.__name), ("key", key)]) + cmd = {"distinct": self.__name, "key": key} if filter is not None: if "query" in kwargs: raise ConfigurationError("can't pass both filter and query") @@ -3052,7 +3024,7 @@ def _cmd( user_fields={"values": 1}, )["values"] - return self._retryable_non_cursor_read(_cmd, session) + return self._retryable_non_cursor_read(_cmd, session, operation=_Op.DISTINCT) def _write_concern_for_cmd( self, cmd: Mapping[str, Any], session: Optional[ClientSession] @@ -3083,7 +3055,7 @@ def __find_and_modify( "return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER" ) collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd = SON([("findAndModify", self.__name), ("query", filter), ("new", return_document)]) + cmd = {"findAndModify": self.__name, "query": filter, "new": return_document} if let is not None: common.validate_is_mapping("let", let) cmd["let"] = let @@ -3093,7 +3065,7 @@ def __find_and_modify( if sort is not None: cmd["sort"] = helpers._index_document(sort) if upsert is not None: - common.validate_boolean("upsert", upsert) + validate_boolean("upsert", upsert) cmd["upsert"] = upsert if hint is not None: if not isinstance(hint, str): @@ -3136,7 +3108,10 @@ def _find_and_modify( return out.get("value") return self.__database.client._retryable_write( - write_concern.acknowledged, _find_and_modify, session + write_concern.acknowledged, + _find_and_modify, + session, + operation=_Op.FIND_AND_MODIFY, ) def find_one_and_delete( @@ -3176,30 +3151,29 @@ def find_one_and_delete( >>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False}) {'x': 1} - :Parameters: - - `filter`: A query that matches the document to delete. - - `projection` (optional): a list of field names that should be + :param filter: A query that matches the document to delete. + :param projection: a list of field names that should be returned in the result document or a mapping specifying the fields to include or exclude. If `projection` is a list "_id" will always be returned. Use a mapping to exclude fields from the result (e.g. projection={'_id': False}). - - `sort` (optional): a list of (key, direction) pairs + :param sort: a list of (key, direction) pairs specifying the sort order for the query. If multiple documents match the query, they are sorted and the first is deleted. - - `hint` (optional): An index to use to support the query predicate + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.4 and above. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be + :param let: Map of parameter names and values. Values must be constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): additional command arguments can be passed + :param kwargs: additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). @@ -3265,40 +3239,39 @@ def find_one_and_replace( {'x': 1, '_id': 1} {'x': 1, '_id': 2} - :Parameters: - - `filter`: A query that matches the document to replace. - - `replacement`: The replacement document. - - `projection` (optional): A list of field names that should be + :param filter: A query that matches the document to replace. + :param replacement: The replacement document. + :param projection: A list of field names that should be returned in the result document or a mapping specifying the fields to include or exclude. If `projection` is a list "_id" will always be returned. Use a mapping to exclude fields from the result (e.g. projection={'_id': False}). - - `sort` (optional): a list of (key, direction) pairs + :param sort: a list of (key, direction) pairs specifying the sort order for the query. If multiple documents match the query, they are sorted and the first is replaced. - - `upsert` (optional): When ``True``, inserts a new document if no + :param upsert: When ``True``, inserts a new document if no document matches the query. Defaults to ``False``. - - `return_document`: If + :param return_document: If :attr:`ReturnDocument.BEFORE` (the default), returns the original document before it was replaced, or ``None`` if no document matches. If :attr:`ReturnDocument.AFTER`, returns the replaced or inserted document. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.4 and above. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be + :param let: Map of parameter names and values. Values must be constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): additional command arguments can be passed + :param kwargs: additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). @@ -3412,41 +3385,40 @@ def find_one_and_update( ... sort=[('_id', pymongo.DESCENDING)]) {'_id': 701, 'done': True, 'result': {'count': 17}} - :Parameters: - - `filter`: A query that matches the document to update. - - `update`: The update operations to apply. - - `projection` (optional): A list of field names that should be + :param filter: A query that matches the document to update. + :param update: The update operations to apply. + :param projection: A list of field names that should be returned in the result document or a mapping specifying the fields to include or exclude. If `projection` is a list "_id" will always be returned. Use a dict to exclude fields from the result (e.g. projection={'_id': False}). - - `sort` (optional): a list of (key, direction) pairs + :param sort: a list of (key, direction) pairs specifying the sort order for the query. If multiple documents match the query, they are sorted and the first is updated. - - `upsert` (optional): When ``True``, inserts a new document if no + :param upsert: When ``True``, inserts a new document if no document matches the query. Defaults to ``False``. - - `return_document`: If + :param return_document: If :attr:`ReturnDocument.BEFORE` (the default), returns the original document before it was updated. If :attr:`ReturnDocument.AFTER`, returns the updated or inserted document. - - `array_filters` (optional): A list of filters specifying which + :param array_filters: A list of filters specifying which array elements an update should apply. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.4 and above. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be + :param let: Map of parameter names and values. Values must be constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): additional command arguments can be passed + :param kwargs: additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 42becece28..6d48a87824 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -73,7 +73,7 @@ def __init__( self.__killed = self.__id == 0 self.__comment = comment if self.__killed: - self.__end_session(True) + self.__end_session() if "ns" in cursor_info: # noqa: SIM401 self.__ns = cursor_info["ns"] @@ -112,9 +112,9 @@ def __die(self, synchronous: bool = False) -> None: self.__session = None self.__sock_mgr = None - def __end_session(self, synchronous: bool) -> None: + def __end_session(self) -> None: if self.__session and not self.__explicit_session: - self.__session._end_session(lock=synchronous) + self.__session.end_session() self.__session = None def close(self) -> None: @@ -134,8 +134,7 @@ def batch_size(self, batch_size: int) -> CommandCursor[_DocumentType]: Raises :exc:`TypeError` if `batch_size` is not an integer. Raises :exc:`ValueError` if `batch_size` is less than ``0``. - :Parameters: - - `batch_size`: The size of each batch of results requested. + :param batch_size: The size of each batch of results requested. """ if not isinstance(batch_size, int): raise TypeError("batch_size must be an integer") @@ -335,8 +334,7 @@ def try_next(self) -> Optional[_DocumentType]: document is returned, otherwise, if the getMore returns no documents (because there is no additional data) then ``None`` is returned. - :Returns: - The next document or ``None`` when no document is available + :return: The next document or ``None`` when no document is available after running a single getMore or when the cursor is closed. .. versionadded:: 4.5 diff --git a/pymongo/common.py b/pymongo/common.py index e3da3a5f69..57560a7b0d 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -17,9 +17,9 @@ from __future__ import annotations import datetime -import inspect import warnings from collections import OrderedDict, abc +from difflib import get_close_matches from typing import ( TYPE_CHECKING, Any, @@ -40,7 +40,6 @@ from bson.binary import UuidRepresentation from bson.codec_options import CodecOptions, DatetimeConversion, TypeRegistry from bson.raw_bson import RawBSONDocument -from pymongo.auth import MECHANISMS from pymongo.compression_support import ( validate_compressors, validate_zlib_compression_level, @@ -162,9 +161,12 @@ def clean_node(node: str) -> tuple[str, int]: return host.lower(), port -def raise_config_error(key: str, dummy: Any) -> NoReturn: +def raise_config_error(key: str, suggestions: Optional[list] = None) -> NoReturn: """Raise ConfigurationError with the given key name.""" - raise ConfigurationError(f"Unknown option {key}") + msg = f"Unknown option: {key}." + if suggestions: + msg += f" Did you mean one of ({', '.join(suggestions)}) or maybe a camelCase version of one? Refer to docstring." + raise ConfigurationError(msg) # Mapping of URI uuid representation options to valid subtypes. @@ -376,6 +378,8 @@ def validate_read_preference_mode(dummy: Any, value: Any) -> _ServerMode: def validate_auth_mechanism(option: str, value: Any) -> str: """Validate the authMechanism URI option.""" + from pymongo.auth import MECHANISMS + if value not in MECHANISMS: raise ValueError(f"{option} must be in {tuple(MECHANISMS)}") return value @@ -420,7 +424,8 @@ def validate_read_preference_tags(name: str, value: Any) -> list[dict[str, str]] "CANONICALIZE_HOST_NAME", "SERVICE_REALM", "AWS_SESSION_TOKEN", - "PROVIDER_NAME", + "ENVIRONMENT", + "TOKEN_RESOURCE", ] ) @@ -436,50 +441,42 @@ def validate_auth_mechanism_properties(option: str, value: Any) -> dict[str, Uni props[key] = value elif isinstance(value, bool): props[key] = str(value).lower() - elif key in ["allowed_hosts"] and isinstance(value, list): + elif key in ["ALLOWED_HOSTS"] and isinstance(value, list): props[key] = value - elif inspect.isfunction(value): - signature = inspect.signature(value) - if key == "request_token_callback": - expected_params = 2 - else: - raise ValueError(f"Unrecognized Auth mechanism function {key}") - if len(signature.parameters) != expected_params: - msg = f"{key} must accept {expected_params} parameters" - raise ValueError(msg) + elif key in ["OIDC_CALLBACK", "OIDC_HUMAN_CALLBACK"]: + from pymongo.auth_oidc import OIDCCallback + + if not isinstance(value, OIDCCallback): + raise ValueError("callback must be an OIDCCallback object") props[key] = value else: - raise ValueError( - "Auth mechanism property values must be strings or callback functions" - ) + raise ValueError(f"Invalid type for auth mechanism property {key}, {type(value)}") return props value = validate_string(option, value) + value = unquote_plus(value) for opt in value.split(","): - try: - key, val = opt.split(":") - except ValueError: + key, _, val = opt.partition(":") + if not val: + raise ValueError("Malformed auth mechanism properties") + if key not in _MECHANISM_PROPS: # Try not to leak the token. - if "AWS_SESSION_TOKEN" in opt: - opt = ( # noqa: PLW2901 - "AWS_SESSION_TOKEN:, did you forget " - "to percent-escape the token with quote_plus?" + if "AWS_SESSION_TOKEN" in key: + raise ValueError( + "auth mechanism properties must be " + "key:value pairs like AWS_SESSION_TOKEN:" ) - raise ValueError( - "auth mechanism properties must be " - "key:value pairs like SERVICE_NAME:" - f"mongodb, not {opt}." - ) from None - if key not in _MECHANISM_PROPS: + raise ValueError( f"{key} is not a supported auth " "mechanism property. Must be one of " f"{tuple(_MECHANISM_PROPS)}." ) + if key == "CANONICALIZE_HOST_NAME": props[key] = validate_boolean_or_string(key, val) else: - props[key] = unquote_plus(val) + props[key] = val return props @@ -810,14 +807,24 @@ def validate_auth_option(option: str, value: Any) -> tuple[str, Any]: """Validate optional authentication parameters.""" lower, value = validate(option, value) if lower not in _AUTH_OPTIONS: - raise ConfigurationError(f"Unknown authentication option: {option}") + raise ConfigurationError(f"Unknown option: {option}. Must be in {_AUTH_OPTIONS}") return option, value +def _get_validator( + key: str, validators: dict[str, Callable[[Any, Any], Any]], normed_key: Optional[str] = None +) -> Callable: + normed_key = normed_key or key + try: + return validators[normed_key] + except KeyError: + suggestions = get_close_matches(normed_key, validators, cutoff=0.2) + raise_config_error(key, suggestions) + + def validate(option: str, value: Any) -> tuple[str, Any]: """Generic validation function.""" - lower = option.lower() - validator = VALIDATORS.get(lower, raise_config_error) + validator = _get_validator(option, VALIDATORS, normed_key=option.lower()) value = validator(option, value) return option, value @@ -828,9 +835,8 @@ def get_validated_options( """Validate each entry in options and raise a warning if it is not valid. Returns a copy of options with invalid entries removed. - :Parameters: - - `opts`: A dict containing MongoDB URI options. - - `warn` (optional): If ``True`` then warnings will be logged and + :param opts: A dict containing MongoDB URI options. + :param warn: If ``True`` then warnings will be logged and invalid options will be ignored. Otherwise, invalid options will cause errors. """ @@ -856,15 +862,15 @@ def get_setter_key(x: str) -> str: for opt, value in options.items(): normed_key = get_normed_key(opt) try: - validator = URI_OPTIONS_VALIDATOR_MAP.get(normed_key, raise_config_error) - value = validator(opt, value) # noqa: PLW2901 + validator = _get_validator(opt, URI_OPTIONS_VALIDATOR_MAP, normed_key=normed_key) + validated = validator(opt, value) except (ValueError, TypeError, ConfigurationError) as exc: if warn: warnings.warn(str(exc), stacklevel=2) else: raise else: - validated_options[get_setter_key(normed_key)] = value + validated_options[get_setter_key(normed_key)] = validated return validated_options diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index ad54d628bf..2f155352d2 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -16,35 +16,39 @@ import warnings from typing import Any, Iterable, Optional, Union -try: - import snappy +from pymongo.hello import HelloCompat +from pymongo.helpers import _SENSITIVE_COMMANDS - _HAVE_SNAPPY = True -except ImportError: - # python-snappy isn't available. - _HAVE_SNAPPY = False +_SUPPORTED_COMPRESSORS = {"snappy", "zlib", "zstd"} +_NO_COMPRESSION = {HelloCompat.CMD, HelloCompat.LEGACY_CMD} +_NO_COMPRESSION.update(_SENSITIVE_COMMANDS) -try: - import zlib - _HAVE_ZLIB = True -except ImportError: - # Python built without zlib support. - _HAVE_ZLIB = False +def _have_snappy() -> bool: + try: + import snappy # type:ignore[import] # noqa: F401 -try: - from zstandard import ZstdCompressor, ZstdDecompressor + return True + except ImportError: + return False - _HAVE_ZSTD = True -except ImportError: - _HAVE_ZSTD = False -from pymongo.hello import HelloCompat -from pymongo.monitoring import _SENSITIVE_COMMANDS +def _have_zlib() -> bool: + try: + import zlib # noqa: F401 -_SUPPORTED_COMPRESSORS = {"snappy", "zlib", "zstd"} -_NO_COMPRESSION = {HelloCompat.CMD, HelloCompat.LEGACY_CMD} -_NO_COMPRESSION.update(_SENSITIVE_COMMANDS) + return True + except ImportError: + return False + + +def _have_zstd() -> bool: + try: + import zstandard # noqa: F401 + + return True + except ImportError: + return False def validate_compressors(dummy: Any, value: Union[str, Iterable[str]]) -> list[str]: @@ -59,21 +63,21 @@ def validate_compressors(dummy: Any, value: Union[str, Iterable[str]]) -> list[s if compressor not in _SUPPORTED_COMPRESSORS: compressors.remove(compressor) warnings.warn(f"Unsupported compressor: {compressor}", stacklevel=2) - elif compressor == "snappy" and not _HAVE_SNAPPY: + elif compressor == "snappy" and not _have_snappy(): compressors.remove(compressor) warnings.warn( "Wire protocol compression with snappy is not available. " "You must install the python-snappy module for snappy support.", stacklevel=2, ) - elif compressor == "zlib" and not _HAVE_ZLIB: + elif compressor == "zlib" and not _have_zlib(): compressors.remove(compressor) warnings.warn( "Wire protocol compression with zlib is not available. " "The zlib module is not available.", stacklevel=2, ) - elif compressor == "zstd" and not _HAVE_ZSTD: + elif compressor == "zstd" and not _have_zstd(): compressors.remove(compressor) warnings.warn( "Wire protocol compression with zstandard is not available. " @@ -118,6 +122,8 @@ class SnappyContext: @staticmethod def compress(data: bytes) -> bytes: + import snappy + return snappy.compress(data) @@ -128,6 +134,8 @@ def __init__(self, level: int): self.level = level def compress(self, data: bytes) -> bytes: + import zlib + return zlib.compress(data, self.level) @@ -138,7 +146,9 @@ class ZstdContext: def compress(data: bytes) -> bytes: # ZstdCompressor is not thread safe. # TODO: Use a pool? - return ZstdCompressor().compress(data) + import zstandard + + return zstandard.ZstdCompressor().compress(data) def decompress(data: bytes, compressor_id: int) -> bytes: @@ -147,12 +157,18 @@ def decompress(data: bytes, compressor_id: int) -> bytes: # https://github.com/andrix/python-snappy/issues/65 # This only matters when data is a memoryview since # id(bytes(data)) == id(data) when data is a bytes. + import snappy + return snappy.uncompress(bytes(data)) elif compressor_id == ZlibContext.compressor_id: + import zlib + return zlib.decompress(data) elif compressor_id == ZstdContext.compressor_id: # ZstdDecompressor is not thread safe. # TODO: Use a pool? - return ZstdDecompressor().decompress(data) + import zstandard + + return zstandard.ZstdDecompressor().decompress(data) else: raise ValueError("Unknown compressorId %d" % (compressor_id,)) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 6dfb3ba90b..3151fcaf3d 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -40,7 +40,6 @@ from pymongo import helpers from pymongo.collation import validate_collation_or_none from pymongo.common import ( - validate_boolean, validate_is_document_type, validate_is_mapping, ) @@ -57,6 +56,7 @@ ) from pymongo.response import PinnedResponse from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType +from pymongo.write_concern import validate_boolean if TYPE_CHECKING: from _typeshed import SupportsItems @@ -272,14 +272,14 @@ def __init__( self.__comment = comment self.__max_time_ms = max_time_ms self.__max_await_time_ms: Optional[int] = None - self.__max: Optional[Union[SON[Any, Any], _Sort]] = max - self.__min: Optional[Union[SON[Any, Any], _Sort]] = min + self.__max: Optional[Union[dict[Any, Any], _Sort]] = max + self.__min: Optional[Union[dict[Any, Any], _Sort]] = min self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id self.__allow_disk_use = allow_disk_use self.__snapshot = snapshot - self.__hint: Union[str, SON[str, Any], None] + self.__hint: Union[str, dict[str, Any], None] self.__set_hint(hint) # Exhaust cursor support @@ -473,17 +473,12 @@ def __query_spec(self) -> Mapping[str, Any]: if operators: # Make a shallow copy so we can cleanly rewind or clone. - spec = copy.copy(self.__spec) + spec = dict(self.__spec) # Allow-listed commands must be wrapped in $query. if "$query" not in spec: # $query has to come first - spec = SON([("$query", spec)]) - - if not isinstance(spec, SON): - # Ensure the spec is SON. As order is important this will - # ensure its set before merging in any extra operators. - spec = SON(spec) + spec = {"$query": spec} spec.update(operators) return spec @@ -495,7 +490,7 @@ def __query_spec(self) -> Mapping[str, Any]: elif "query" in self.__spec and ( len(self.__spec) == 1 or next(iter(self.__spec)) == "query" ): - return SON({"$query": self.__spec}) + return {"$query": self.__spec} return self.__spec @@ -548,8 +543,7 @@ def allow_disk_use(self, allow_disk_use: bool) -> Cursor[_DocumentType]: .. note:: `allow_disk_use` requires server version **>= 4.4** - :Parameters: - - `allow_disk_use`: if True, MongoDB may use temporary + :param allow_disk_use: if True, MongoDB may use temporary disk files to store data exceeding the system memory limit while processing a blocking sort operation. @@ -570,8 +564,7 @@ def limit(self, limit: int) -> Cursor[_DocumentType]: has already been used. The last `limit` applied to this cursor takes precedence. A limit of ``0`` is equivalent to no limit. - :Parameters: - - `limit`: the number of results to return + :param limit: the number of results to return .. seealso:: The MongoDB documentation on `limit `_. """ @@ -601,8 +594,7 @@ def batch_size(self, batch_size: int) -> Cursor[_DocumentType]: :class:`Cursor` has already been used. The last `batch_size` applied to this cursor takes precedence. - :Parameters: - - `batch_size`: The size of each batch of results requested. + :param batch_size: The size of each batch of results requested. """ if not isinstance(batch_size, int): raise TypeError("batch_size must be an integer") @@ -622,8 +614,7 @@ def skip(self, skip: int) -> Cursor[_DocumentType]: already been used. The last `skip` applied to this cursor takes precedence. - :Parameters: - - `skip`: the number of results to skip + :param skip: the number of results to skip """ if not isinstance(skip, int): raise TypeError("skip must be an integer") @@ -644,8 +635,7 @@ def max_time_ms(self, max_time_ms: Optional[int]) -> Cursor[_DocumentType]: Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. - :Parameters: - - `max_time_ms`: the time limit after which the operation is aborted + :param max_time_ms: the time limit after which the operation is aborted """ if not isinstance(max_time_ms, int) and max_time_ms is not None: raise TypeError("max_time_ms must be an integer or None") @@ -665,8 +655,7 @@ def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> Cursor[_Documen .. note:: `max_await_time_ms` requires server version **>= 3.2** - :Parameters: - - `max_await_time_ms`: the time limit after which the operation is + :param max_await_time_ms: the time limit after which the operation is aborted .. versionadded:: 3.2 @@ -727,8 +716,7 @@ def __getitem__(self, index: Union[int, slice]) -> Union[_DocumentType, Cursor[_ start value, or a stop value less than or equal to the start value. - :Parameters: - - `index`: An integer or slice index to be applied to this cursor + :param index: An integer or slice index to be applied to this cursor """ self.__check_okay_to_chain() self.__empty = False @@ -777,8 +765,7 @@ def max_scan(self, max_scan: Optional[int]) -> Cursor[_DocumentType]: cursor has already been used. Only the last :meth:`max_scan` applied to this cursor has any effect. - :Parameters: - - `max_scan`: the maximum number of documents to scan + :param max_scan: the maximum number of documents to scan .. versionchanged:: 3.7 Deprecated :meth:`max_scan`. Support for this option is deprecated in @@ -796,8 +783,7 @@ def max(self, spec: _Sort) -> Cursor[_DocumentType]: the query uses the expected index and starting in MongoDB 4.2 :meth:`~hint` will be required. - :Parameters: - - `spec`: a list of field, limit pairs specifying the exclusive + :param spec: a list of field, limit pairs specifying the exclusive upper bound for all keys of a specific index in order. .. versionchanged:: 3.8 @@ -809,7 +795,7 @@ def max(self, spec: _Sort) -> Cursor[_DocumentType]: raise TypeError("spec must be an instance of list or tuple") self.__check_okay_to_chain() - self.__max = SON(spec) + self.__max = dict(spec) return self def min(self, spec: _Sort) -> Cursor[_DocumentType]: @@ -819,8 +805,7 @@ def min(self, spec: _Sort) -> Cursor[_DocumentType]: the query uses the expected index and starting in MongoDB 4.2 :meth:`~hint` will be required. - :Parameters: - - `spec`: a list of field, limit pairs specifying the inclusive + :param spec: a list of field, limit pairs specifying the inclusive lower bound for all keys of a specific index in order. .. versionchanged:: 3.8 @@ -832,7 +817,7 @@ def min(self, spec: _Sort) -> Cursor[_DocumentType]: raise TypeError("spec must be an instance of list or tuple") self.__check_okay_to_chain() - self.__min = SON(spec) + self.__min = dict(spec) return self def sort( @@ -873,10 +858,9 @@ def sort( already been used. Only the last :meth:`sort` applied to this cursor has any effect. - :Parameters: - - `key_or_list`: a single key or a list of (key, direction) + :param key_or_list: a single key or a list of (key, direction) pairs specifying the keys to sort on - - `direction` (optional): only used if `key_or_list` is a single + :param direction: only used if `key_or_list` is a single key, if not given :data:`~pymongo.ASCENDING` is assumed """ self.__check_okay_to_chain() @@ -896,8 +880,7 @@ def distinct(self, key: str) -> list: :class:`~pymongo.collection.Collection` instance on which :meth:`~pymongo.collection.Collection.find` was called. - :Parameters: - - `key`: name of key for which we want to get the distinct values + :param key: name of key for which we want to get the distinct values .. seealso:: :meth:`pymongo.collection.Collection.distinct` """ @@ -961,8 +944,7 @@ def hint(self, index: Optional[_Hint]) -> Cursor[_DocumentType]: cleared. The last hint applied to this cursor takes precedence over all others. - :Parameters: - - `index`: index to hint on (as an index specifier) + :param index: index to hint on (as an index specifier) """ self.__check_okay_to_chain() self.__set_hint(index) @@ -973,8 +955,7 @@ def comment(self, comment: Any) -> Cursor[_DocumentType]: http://mongodb.com/docs/manual/reference/operator/comment/ - :Parameters: - - `comment`: A string to attach to the query to help interpret and + :param comment: A string to attach to the query to help interpret and trace the operation in the server logs and in profile data. .. versionadded:: 2.7 @@ -1005,8 +986,7 @@ def where(self, code: Union[str, Code]) -> Cursor[_DocumentType]: .. note:: MongoDB 4.4 drops support for :class:`~bson.code.Code` with scope variables. Consider using `$expr`_ instead. - :Parameters: - - `code`: JavaScript expression to use as a filter + :param code: JavaScript expression to use as a filter .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ .. _$where: https://mongodb.com/docs/manual/reference/operator/query/where/ @@ -1035,8 +1015,7 @@ def collation(self, collation: Optional[_CollationIn]) -> Cursor[_DocumentType]: already been used. Only the last collation applied to this cursor has any effect. - :Parameters: - - `collation`: An instance of :class:`~pymongo.collation.Collation`. + :param collation: An instance of :class:`~pymongo.collation.Collation`. """ self.__check_okay_to_chain() self.__collation = validate_collation_or_none(collation) diff --git a/pymongo/database.py b/pymongo/database.py index 70cdee2dc3..70580694e5 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -33,7 +33,6 @@ from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions from bson.dbref import DBRef -from bson.son import SON from bson.timestamp import Timestamp from pymongo import _csot, common from pymongo.aggregation import _DatabaseAggregationCommand @@ -42,6 +41,7 @@ from pymongo.command_cursor import CommandCursor from pymongo.common import _ecoc_coll_name, _esc_coll_name from pymongo.errors import CollectionInvalid, InvalidName, InvalidOperation +from pymongo.operations import _Op from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline @@ -76,7 +76,7 @@ def __init__( self, client: MongoClient[_DocumentType], name: str, - codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional[ReadConcern] = None, @@ -87,18 +87,17 @@ def __init__( :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if `name` is not a valid database name. - :Parameters: - - `client`: A :class:`~pymongo.mongo_client.MongoClient` instance. - - `name`: The database name. - - `codec_options` (optional): An instance of + :param client: A :class:`~pymongo.mongo_client.MongoClient` instance. + :param name: The database name. + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the default) client.codec_options is used. - - `read_preference` (optional): The read preference to use. If + :param read_preference: The read preference to use. If ``None`` (the default) client.read_preference is used. - - `write_concern` (optional): An instance of + :param write_concern: An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the default) client.write_concern is used. - - `read_concern` (optional): An instance of + :param read_concern: An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) client.read_concern is used. @@ -155,7 +154,7 @@ def name(self) -> str: def with_options( self, - codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional[ReadConcern] = None, @@ -171,20 +170,19 @@ def with_options( >>> db2.read_preference Secondary(tag_sets=[{'node': 'analytics'}], max_staleness=-1, hedge=None) - :Parameters: - - `codec_options` (optional): An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the default) the :attr:`codec_options` of this :class:`Collection` is used. - - `read_preference` (optional): The read preference to use. If + :param read_preference: The read preference to use. If ``None`` (the default) the :attr:`read_preference` of this :class:`Collection` is used. See :mod:`~pymongo.read_preferences` for options. - - `write_concern` (optional): An instance of + :param write_concern: An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the default) the :attr:`write_concern` of this :class:`Collection` is used. - - `read_concern` (optional): An instance of + :param read_concern: An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) the :attr:`read_concern` of this :class:`Collection` is used. @@ -219,8 +217,7 @@ def __getattr__(self, name: str) -> Collection[_DocumentType]: Raises InvalidName if an invalid collection name is used. - :Parameters: - - `name`: the name of the collection to get + :param name: the name of the collection to get """ if name.startswith("_"): raise AttributeError( @@ -234,15 +231,14 @@ def __getitem__(self, name: str) -> Collection[_DocumentType]: Raises InvalidName if an invalid collection name is used. - :Parameters: - - `name`: the name of the collection to get + :param name: the name of the collection to get """ return Collection(self, name) def get_collection( self, name: str, - codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional[ReadConcern] = None, @@ -265,21 +261,20 @@ def get_collection( >>> coll2.read_preference Secondary(tag_sets=None) - :Parameters: - - `name`: The name of the collection - a string. - - `codec_options` (optional): An instance of + :param name: The name of the collection - a string. + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the default) the :attr:`codec_options` of this :class:`Database` is used. - - `read_preference` (optional): The read preference to use. If + :param read_preference: The read preference to use. If ``None`` (the default) the :attr:`read_preference` of this :class:`Database` is used. See :mod:`~pymongo.read_preferences` for options. - - `write_concern` (optional): An instance of + :param write_concern: An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the default) the :attr:`write_concern` of this :class:`Database` is used. - - `read_concern` (optional): An instance of + :param read_concern: An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) the :attr:`read_concern` of this :class:`Database` is used. @@ -325,7 +320,7 @@ def _get_encrypted_fields( def create_collection( self, name: str, - codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional[ReadConcern] = None, @@ -341,30 +336,29 @@ def create_collection( creation. :class:`~pymongo.errors.CollectionInvalid` will be raised if the collection already exists. - :Parameters: - - `name`: the name of the collection to create - - `codec_options` (optional): An instance of + :param name: the name of the collection to create + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the default) the :attr:`codec_options` of this :class:`Database` is used. - - `read_preference` (optional): The read preference to use. If + :param read_preference: The read preference to use. If ``None`` (the default) the :attr:`read_preference` of this :class:`Database` is used. - - `write_concern` (optional): An instance of + :param write_concern: An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the default) the :attr:`write_concern` of this :class:`Database` is used. - - `read_concern` (optional): An instance of + :param read_concern: An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) the :attr:`read_concern` of this :class:`Database` is used. - - `collation` (optional): An instance of + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - ``check_exists`` (optional): if True (the default), send a listCollections command to + :param `check_exists`: if True (the default), send a listCollections command to check if the collection already exists before creation. - - `**kwargs` (optional): additional keyword arguments will + :param kwargs: additional keyword arguments will be passed as options for the `create collection command`_ All optional `create collection command`_ parameters should be passed @@ -503,11 +497,10 @@ def aggregate( .. note:: The :attr:`~pymongo.database.Database.write_concern` of this collection is automatically applied to this operation. - :Parameters: - - `pipeline`: a list of aggregation pipeline stages - - `session` (optional): a + :param pipeline: a list of aggregation pipeline stages + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): extra `aggregate command`_ parameters. + :param kwargs: extra `aggregate command`_ parameters. All optional `aggregate command`_ parameters should be passed as keyword arguments to this method. Valid options include, but are not @@ -529,8 +522,7 @@ def aggregate( aggregate expression context (e.g. ``"$$var"``). This option is only supported on MongoDB >= 5.0. - :Returns: - A :class:`~pymongo.command_cursor.CommandCursor` over the result + :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result set. .. versionadded:: 3.9 @@ -551,7 +543,11 @@ def aggregate( user_fields={"cursor": {"firstBatch": 1}}, ) return self.client._retryable_read( - cmd.get_cursor, cmd.get_read_preference(s), s, retryable=not cmd._performs_write # type: ignore[arg-type] + cmd.get_cursor, + cmd.get_read_preference(s), # type: ignore[arg-type] + s, + retryable=not cmd._performs_write, + operation=_Op.AGGREGATE, ) def watch( @@ -607,47 +603,45 @@ def watch( For a precise description of the resume process see the `change streams specification`_. - :Parameters: - - `pipeline` (optional): A list of aggregation pipeline stages to + :param pipeline: A list of aggregation pipeline stages to append to an initial ``$changeStream`` stage. Not all pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - - `full_document` (optional): The fullDocument to pass as an option + :param full_document: The fullDocument to pass as an option to the ``$changeStream`` stage. Allowed values: 'updateLookup', 'whenAvailable', 'required'. When set to 'updateLookup', the change notification for partial updates will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred. - - `full_document_before_change`: Allowed values: 'whenAvailable' + :param full_document_before_change: Allowed values: 'whenAvailable' and 'required'. Change events may now result in a 'fullDocumentBeforeChange' response field. - - `resume_after` (optional): A resume token. If provided, the + :param resume_after: A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token is the _id value of a change document. - - `max_await_time_ms` (optional): The maximum time in milliseconds + :param max_await_time_ms: The maximum time in milliseconds for the server to wait for changes before responding to a getMore operation. - - `batch_size` (optional): The maximum number of documents to return + :param batch_size: The maximum number of documents to return per batch. - - `collation` (optional): The :class:`~pymongo.collation.Collation` + :param collation: The :class:`~pymongo.collation.Collation` to use for the aggregation. - - `start_at_operation_time` (optional): If provided, the resulting + :param start_at_operation_time: If provided, the resulting change stream will only return changes that occurred at or after the specified :class:`~bson.timestamp.Timestamp`. Requires MongoDB >= 4.0. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `start_after` (optional): The same as `resume_after` except that + :param start_after: The same as `resume_after` except that `start_after` can resume notifications after an invalidate event. This option and `resume_after` are mutually exclusive. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `show_expanded_events` (optional): Include expanded events such as DDL events like `dropIndexes`. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. - :Returns: - A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. + :return: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. .. versionchanged:: 4.3 Added `show_expanded_events` parameter. @@ -666,7 +660,7 @@ def watch( .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md """ return DatabaseChangeStream( self, @@ -736,7 +730,7 @@ def _command( ) -> Union[dict[str, Any], _CodecDocumentType]: """Internal command helper.""" if isinstance(command, str): - command = SON([(command, value)]) + command = {command: value} command.update(kwargs) with self.__client._tmp_session(session) as s: @@ -811,46 +805,50 @@ def command( using: >>> db.command("buildinfo") + OR + >>> db.command({"buildinfo": 1}) For a command where the value matters, like ``{count: collection_name}`` we can do: >>> db.command("count", collection_name) + OR + >>> db.command({"count": collection_name}) For commands that take additional arguments we can use - kwargs. So ``{filemd5: object_id, root: file_root}`` becomes: + kwargs. So ``{count: collection_name, query: query}`` becomes: - >>> db.command("filemd5", object_id, root=file_root) + >>> db.command("count", collection_name, query=query) + OR + >>> db.command({"count": collection_name, "query": query}) - :Parameters: - - `command`: document representing the command to be issued, + :param command: document representing the command to be issued, or the name of the command (for simple commands only). .. note:: the order of keys in the `command` document is significant (the "verb" must come first), so commands which require multiple keys (e.g. `findandmodify`) - should use an instance of :class:`~bson.son.SON` or - a string and kwargs instead of a Python `dict`. + should be done with this in mind. - - `value` (optional): value to use for the command verb when + :param value: value to use for the command verb when `command` is passed as a string - - `check` (optional): check the response for errors, raising + :param check: check the response for errors, raising :class:`~pymongo.errors.OperationFailure` if there are any - - `allowable_errors`: if `check` is ``True``, error messages + :param allowable_errors: if `check` is ``True``, error messages in this list will be ignored by error-checking - - `read_preference` (optional): The read preference for this + :param read_preference: The read preference for this operation. See :mod:`~pymongo.read_preferences` for options. If the provided `session` is in a transaction, defaults to the read preference configured for the transaction. Otherwise, defaults to :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. - - `codec_options`: A :class:`~bson.codec_options.CodecOptions` + :param codec_options: A :class:`~bson.codec_options.CodecOptions` instance. - - `session` (optional): A + :param session: A :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): additional keyword arguments will + :param kwargs: additional keyword arguments will be added to the command document before it is sent @@ -885,9 +883,14 @@ def command( if comment is not None: kwargs["comment"] = comment + if isinstance(command, str): + command_name = command + else: + command_name = next(iter(command)) + if read_preference is None: read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY - with self.__client._conn_for_reads(read_preference, session) as ( + with self.__client._conn_for_reads(read_preference, session, operation=command_name) as ( connection, read_preference, ): @@ -921,8 +924,7 @@ def cursor_command( Otherwise, behaves identically to issuing a normal MongoDB command. - :Parameters: - - `command`: document representing the command to be issued, + :param command: document representing the command to be issued, or the name of the command (for simple commands only). .. note:: the order of keys in the `command` document is @@ -931,23 +933,23 @@ def cursor_command( should use an instance of :class:`~bson.son.SON` or a string and kwargs instead of a Python `dict`. - - `value` (optional): value to use for the command verb when - `command` is passed as a string - - `read_preference` (optional): The read preference for this - operation. See :mod:`~pymongo.read_preferences` for options. - If the provided `session` is in a transaction, defaults to the - read preference configured for the transaction. - Otherwise, defaults to - :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. - - `codec_options`: A :class:`~bson.codec_options.CodecOptions` - instance. - - `session` (optional): A - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to future getMores for this - command. - - `max_await_time_ms` (optional): The number of ms to wait for more data on future getMores for this command. - - `**kwargs` (optional): additional keyword arguments will - be added to the command document before it is sent + :param value: value to use for the command verb when + `command` is passed as a string + :param read_preference: The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param codec_options`: A :class:`~bson.codec_options.CodecOptions` + instance. + :param session: A + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to future getMores for this + command. + :param max_await_time_ms: The number of ms to wait for more data on future getMores for this command. + :param kwargs: additional keyword arguments will + be added to the command document before it is sent .. note:: :meth:`command` does **not** obey this Database's :attr:`read_preference` or :attr:`codec_options`. You must use the @@ -964,6 +966,11 @@ def cursor_command( .. seealso:: The MongoDB documentation on `commands `_. """ + if isinstance(command, str): + command_name = command + else: + command_name = next(iter(command)) + with self.__client._tmp_session(session, close=False) as tmp_session: opts = codec_options or DEFAULT_CODEC_OPTIONS @@ -971,7 +978,7 @@ def cursor_command( read_preference = ( tmp_session and tmp_session._txn_read_preference() ) or ReadPreference.PRIMARY - with self.__client._conn_for_reads(read_preference, tmp_session) as ( + with self.__client._conn_for_reads(read_preference, tmp_session, command_name) as ( conn, read_preference, ): @@ -1005,6 +1012,7 @@ def cursor_command( def _retryable_read_command( self, command: Union[str, MutableMapping[str, Any]], + operation: str, session: Optional[ClientSession] = None, ) -> dict[str, Any]: """Same as command but used for retryable read commands.""" @@ -1023,7 +1031,7 @@ def _cmd( session=session, ) - return self.__client._retryable_read(_cmd, read_preference, session) + return self.__client._retryable_read(_cmd, read_preference, session, operation) def _list_collections( self, @@ -1037,7 +1045,7 @@ def _list_collections( Collection[MutableMapping[str, Any]], self.get_collection("$cmd", read_preference=read_preference), ) - cmd = SON([("listCollections", 1), ("cursor", {})]) + cmd = {"listCollections": 1, "cursor": {}} cmd.update(kwargs) with self.__client._tmp_session(session, close=False) as tmp_session: cursor = self._command(conn, cmd, read_preference=read_preference, session=tmp_session)[ @@ -1063,22 +1071,20 @@ def list_collections( ) -> CommandCursor[MutableMapping[str, Any]]: """Get a cursor over the collections of this database. - :Parameters: - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `filter` (optional): A query document to filter the list of + :param filter: A query document to filter the list of collections returned from the listCollections command. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): Optional parameters of the + :param kwargs: Optional parameters of the `listCollections command `_ can be passed as keyword arguments to this method. The supported options differ by server version. - :Returns: - An instance of :class:`~pymongo.command_cursor.CommandCursor`. + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. .. versionadded:: 3.6 """ @@ -1096,7 +1102,9 @@ def _cmd( ) -> CommandCursor[MutableMapping[str, Any]]: return self._list_collections(conn, session, read_preference=read_preference, **kwargs) - return self.__client._retryable_read(_cmd, read_pref, session) + return self.__client._retryable_read( + _cmd, read_pref, session, operation=_Op.LIST_COLLECTIONS + ) def list_collection_names( self, @@ -1112,14 +1120,13 @@ def list_collection_names( filter = {"name": {"$regex": r"^(?!system\\.)"}} db.list_collection_names(filter=filter) - :Parameters: - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `filter` (optional): A query document to filter the list of + :param filter: A query document to filter the list of collections returned from the listCollections command. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): Optional parameters of the + :param kwargs: Optional parameters of the `listCollections command `_ can be passed as keyword arguments to this method. The supported @@ -1149,11 +1156,11 @@ def list_collection_names( def _drop_helper( self, name: str, session: Optional[ClientSession] = None, comment: Optional[Any] = None ) -> dict[str, Any]: - command = SON([("drop", name)]) + command = {"drop": name} if comment is not None: command["comment"] = comment - with self.__client._conn_for_writes(session) as connection: + with self.__client._conn_for_writes(session, operation=_Op.DROP) as connection: return self._command( connection, command, @@ -1173,14 +1180,13 @@ def drop_collection( ) -> dict[str, Any]: """Drop a collection. - :Parameters: - - `name_or_collection`: the name of a collection to drop or the + :param name_or_collection: the name of a collection to drop or the collection object itself - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for + :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for Queryable Encryption. For example:: { @@ -1258,20 +1264,19 @@ def validate_collection( See also the MongoDB documentation on the `validate command`_. - :Parameters: - - `name_or_collection`: A Collection object or the name of a + :param name_or_collection: A Collection object or the name of a collection to validate. - - `scandata`: Do extra checks beyond checking the overall + :param scandata: Do extra checks beyond checking the overall structure of the collection. - - `full`: Have the server do a more thorough scan of the + :param full: Have the server do a more thorough scan of the collection. Use with `scandata` for a thorough scan of the structure of the collection and the individual documents. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `background` (optional): A boolean flag that determines whether + :param background: A boolean flag that determines whether the command runs in the background. Requires MongoDB 4.4+. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. .. versionchanged:: 4.1 @@ -1291,7 +1296,7 @@ def validate_collection( if not isinstance(name, str): raise TypeError("name_or_collection must be an instance of str or Collection") - cmd = SON([("validate", name), ("scandata", scandata), ("full", full)]) + cmd = {"validate": name, "scandata": scandata, "full": full} if comment is not None: cmd["comment"] = comment @@ -1356,13 +1361,12 @@ def dereference( :class:`ValueError` if `dbref` has a database specified that is different from the current database. - :Parameters: - - `dbref`: the reference - - `session` (optional): a + :param dbref: the reference + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): any additional keyword arguments + :param kwargs: any additional keyword arguments are the same as the arguments to :meth:`~pymongo.collection.Collection.find`. diff --git a/pymongo/encryption.py b/pymongo/encryption.py index cdaf2358d2..c7f02766c9 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -18,26 +18,29 @@ import contextlib import enum import socket +import uuid import weakref from copy import deepcopy from typing import ( TYPE_CHECKING, Any, + Dict, Generic, Iterator, Mapping, MutableMapping, Optional, Sequence, + Union, cast, ) try: - from pymongocrypt.auto_encrypter import AutoEncrypter - from pymongocrypt.errors import MongoCryptError - from pymongocrypt.explicit_encrypter import ExplicitEncrypter - from pymongocrypt.mongocrypt import MongoCryptOptions - from pymongocrypt.state_machine import MongoCryptCallback + from pymongocrypt.auto_encrypter import AutoEncrypter # type:ignore[import] + from pymongocrypt.errors import MongoCryptError # type:ignore[import] + from pymongocrypt.explicit_encrypter import ExplicitEncrypter # type:ignore[import] + from pymongocrypt.mongocrypt import MongoCryptOptions # type:ignore[import] + from pymongocrypt.state_machine import MongoCryptCallback # type:ignore[import] _HAVE_PYMONGOCRYPT = True except ImportError: @@ -49,7 +52,6 @@ from bson.codec_options import CodecOptions from bson.errors import BSONError from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson -from bson.son import SON from pymongo import _csot from pymongo.collection import Collection from pymongo.common import CONNECT_TIMEOUT @@ -83,9 +85,8 @@ _KMS_CONNECT_TIMEOUT = CONNECT_TIMEOUT # CDRIVER-3262 redefined this value to CONNECT_TIMEOUT _MONGOCRYPTD_TIMEOUT_MS = 10000 - -_DATA_KEY_OPTS: CodecOptions[SON[str, Any]] = CodecOptions( - document_class=SON[str, Any], uuid_representation=STANDARD +_DATA_KEY_OPTS: CodecOptions[dict[str, Any]] = CodecOptions( + document_class=Dict[str, Any], uuid_representation=STANDARD ) # Use RawBSONDocument codec options to avoid needlessly decoding # documents from the key vault. @@ -102,7 +103,7 @@ def _wrap_encryption_errors() -> Iterator[None]: # we should propagate them unchanged. raise except Exception as exc: - raise EncryptionError(exc) from None + raise EncryptionError(exc) from exc class _EncryptionIO(MongoCryptCallback): # type: ignore[misc] @@ -135,11 +136,9 @@ def __init__( def kms_request(self, kms_context: MongoCryptKmsContext) -> None: """Complete a KMS request. - :Parameters: - - `kms_context`: A :class:`MongoCryptKmsContext`. + :param kms_context: A :class:`MongoCryptKmsContext`. - :Returns: - None + :return: None """ endpoint = kms_context.endpoint message = kms_context.message @@ -194,12 +193,10 @@ def collection_info( The returned collection info is passed to libmongocrypt which reads the JSON schema. - :Parameters: - - `database`: The database on which to run listCollections. - - `filter`: The filter to pass to listCollections. + :param database: The database on which to run listCollections. + :param filter: The filter to pass to listCollections. - :Returns: - The first document from the listCollections command response as BSON. + :return: The first document from the listCollections command response as BSON. """ with self.client_ref()[database].list_collections(filter=RawBSONDocument(filter)) as cursor: for doc in cursor: @@ -220,12 +217,10 @@ def spawn(self) -> None: def mark_command(self, database: str, cmd: bytes) -> bytes: """Mark a command for encryption. - :Parameters: - - `database`: The database on which to run this command. - - `cmd`: The BSON command to run. + :param database: The database on which to run this command. + :param cmd: The BSON command to run. - :Returns: - The marked command response from mongocryptd. + :return: The marked command response from mongocryptd. """ if not self._spawned and not self.opts._mongocryptd_bypass_spawn: self.spawn() @@ -249,11 +244,9 @@ def mark_command(self, database: str, cmd: bytes) -> bytes: def fetch_keys(self, filter: bytes) -> Iterator[bytes]: """Yields one or more keys from the key vault. - :Parameters: - - `filter`: The filter to pass to find. + :param filter: The filter to pass to find. - :Returns: - A generator which yields the requested keys from the key vault. + :return: A generator which yields the requested keys from the key vault. """ assert self.key_vault_coll is not None with self.key_vault_coll.find(RawBSONDocument(filter)) as cursor: @@ -263,11 +256,9 @@ def fetch_keys(self, filter: bytes) -> Iterator[bytes]: def insert_data_key(self, data_key: bytes) -> Binary: """Insert a data key into the key vault. - :Parameters: - - `data_key`: The data key document to insert. + :param data_key: The data key document to insert. - :Returns: - The _id of the inserted data key document. + :return: The _id of the inserted data key document. """ raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) data_key_id = raw_doc.get("_id") @@ -283,11 +274,9 @@ def bson_encode(self, doc: MutableMapping[str, Any]) -> bytes: A document can be any mapping type (like :class:`dict`). - :Parameters: - - `doc`: mapping type representing a document + :param doc: mapping type representing a document - :Returns: - The encoded BSON bytes. + :return: The encoded BSON bytes. """ return encode(doc) @@ -336,9 +325,8 @@ class _Encrypter: def __init__(self, client: MongoClient[_DocumentTypeArg], opts: AutoEncryptionOpts): """Create a _Encrypter for a client. - :Parameters: - - `client`: The encrypted MongoClient. - - `opts`: The encrypted client's :class:`AutoEncryptionOpts`. + :param client: The encrypted MongoClient. + :param opts: The encrypted client's :class:`AutoEncryptionOpts`. """ if opts._schema_map is None: schema_map = None @@ -382,9 +370,9 @@ def _get_internal_client( opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS ) - io_callbacks = _EncryptionIO( + io_callbacks = _EncryptionIO( # type:ignore[misc] metadata_client, key_vault_coll, mongocryptd_client, opts - ) # type:ignore[misc] + ) self._auto_encrypter = AutoEncrypter( io_callbacks, MongoCryptOptions( @@ -401,16 +389,14 @@ def _get_internal_client( def encrypt( self, database: str, cmd: Mapping[str, Any], codec_options: CodecOptions[_DocumentTypeArg] - ) -> MutableMapping[str, Any]: + ) -> dict[str, Any]: """Encrypt a MongoDB command. - :Parameters: - - `database`: The database for this command. - - `cmd`: A command document. - - `codec_options`: The CodecOptions to use while encoding `cmd`. + :param database: The database for this command. + :param cmd: A command document. + :param codec_options: The CodecOptions to use while encoding `cmd`. - :Returns: - The encrypted command to execute. + :return: The encrypted command to execute. """ self._check_closed() encoded_cmd = _dict_to_bson(cmd, False, codec_options) @@ -422,11 +408,9 @@ def encrypt( def decrypt(self, response: bytes) -> Optional[bytes]: """Decrypt a MongoDB command response. - :Parameters: - - `response`: A MongoDB command response as BSON. + :param response: A MongoDB command response as BSON. - :Returns: - The decrypted command response. + :return: The decrypted command response. """ self._check_closed() with _wrap_encryption_errors(): @@ -513,8 +497,7 @@ def __init__( See :ref:`explicit-client-side-encryption` for an example. - :Parameters: - - `kms_providers`: Map of KMS provider options. The `kms_providers` + :param kms_providers: Map of KMS provider options. The `kms_providers` map values differ by provider: - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. @@ -539,20 +522,23 @@ def __init__( data keys. This key should be generated and stored as securely as possible. - - `key_vault_namespace`: The namespace for the key vault collection. + KMS providers may be specified with an optional name suffix + separated by a colon, for example "kmip:name" or "aws:name". + Named KMS providers do not support :ref:`CSFLE on-demand credentials`. + :param key_vault_namespace: The namespace for the key vault collection. The key vault collection contains all data keys used for encryption and decryption. Data keys are stored as documents in this MongoDB collection. Data keys are protected with encryption by a KMS provider. - - `key_vault_client`: A MongoClient connected to a MongoDB cluster + :param key_vault_client: A MongoClient connected to a MongoDB cluster containing the `key_vault_namespace` collection. - - `codec_options`: An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions` to use when encoding a value for encryption and decoding the decrypted BSON value. This should be the same CodecOptions instance configured on the MongoClient, Database, or Collection used to access application data. - - `kms_tls_options` (optional): A map of KMS provider names to TLS + :param kms_tls_options: A map of KMS provider names to TLS options to use when creating secure connections to KMS providers. Accepts the same TLS options as :class:`pymongo.mongo_client.MongoClient`. For example, to @@ -621,10 +607,11 @@ def create_encrypted_collection( creation. :class:`~pymongo.errors.EncryptionError` will be raised if the collection already exists. - :Parameters: - - `name`: the name of the collection to create - - `encrypted_fields` (dict): Document that describes the encrypted fields for - Queryable Encryption. For example:: + :param name: the name of the collection to create + :param encrypted_fields: Document that describes the encrypted fields for + Queryable Encryption. The "keyId" may be set to ``None`` to auto-generate the data keys. For example: + + .. code-block: python { "escCollection": "enxcol_.encryptedCollection.esc", @@ -644,19 +631,17 @@ def create_encrypted_collection( ] } - The "keyId" may be set to ``None`` to auto-generate the data keys. - - `kms_provider` (optional): the KMS provider to be used - - `master_key` (optional): Identifies a KMS-specific key used to encrypt the + :param kms_provider: the KMS provider to be used + :param master_key: Identifies a KMS-specific key used to encrypt the new data key. If the kmsProvider is "local" the `master_key` is not applicable and may be omitted. - - `**kwargs` (optional): additional keyword arguments are the same as "create_collection". + :param kwargs: additional keyword arguments are the same as "create_collection". All optional `create collection command`_ parameters should be passed as keyword arguments to this method. See the documentation for :meth:`~pymongo.database.Database.create_collection` for all valid options. - :Raises: - - :class:`~pymongo.errors.EncryptedCollectionError`: When either data-key creation or creating the collection fails. + :raises: - :class:`~pymongo.errors.EncryptedCollectionError`: When either data-key creation or creating the collection fails. .. versionadded:: 4.4 @@ -693,14 +678,14 @@ def create_data_key( ) -> Binary: """Create and insert a new data key into the key vault collection. - :Parameters: - - `kms_provider`: The KMS provider to use. Supported values are - "aws", "azure", "gcp", "kmip", and "local". - - `master_key`: Identifies a KMS-specific key used to encrypt the + :param kms_provider: The KMS provider to use. Supported values are + "aws", "azure", "gcp", "kmip", "local", or a named provider like + "kmip:name". + :param master_key: Identifies a KMS-specific key used to encrypt the new data key. If the kmsProvider is "local" the `master_key` is not applicable and may be omitted. - If the `kms_provider` is "aws" it is required and has the + If the `kms_provider` type is "aws" it is required and has the following fields:: - `region` (string): Required. The AWS region, e.g. "us-east-1". @@ -710,7 +695,7 @@ def create_data_key( requests to. May include port number, e.g. "kms.us-east-1.amazonaws.com:443". - If the `kms_provider` is "azure" it is required and has the + If the `kms_provider` type is "azure" it is required and has the following fields:: - `keyVaultEndpoint` (string): Required. Host with optional @@ -718,7 +703,7 @@ def create_data_key( - `keyName` (string): Required. Key name in the key vault. - `keyVersion` (string): Optional. Version of the key to use. - If the `kms_provider` is "gcp" it is required and has the + If the `kms_provider` type is "gcp" it is required and has the following fields:: - `projectId` (string): Required. The Google cloud project ID. @@ -730,7 +715,7 @@ def create_data_key( - `endpoint` (string): Optional. Host with optional port. Defaults to "cloudkms.googleapis.com". - If the `kms_provider` is "kmip" it is optional and has the + If the `kms_provider` type is "kmip" it is optional and has the following fields:: - `keyId` (string): Optional. `keyId` is the KMIP Unique @@ -740,7 +725,7 @@ def create_data_key( - `endpoint` (string): Optional. Host with optional port, e.g. "example.vault.azure.net:". - - `key_alt_names` (optional): An optional list of string alternate + :param key_alt_names: An optional list of string alternate names used to reference a key. If a key is created with alternate names, then encryption may refer to the key by the unique alternate name instead of by ``key_id``. The following example shows creating @@ -750,11 +735,10 @@ def create_data_key( # reference the key with the alternate name client_encryption.encrypt("457-55-5462", key_alt_name="name1", algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) - - `key_material` (optional): Sets the custom key material to be used + :param key_material: Sets the custom key material to be used by the data key for encryption and decryption. - :Returns: - The ``_id`` of the created data key document as a + :return: The ``_id`` of the created data key document as a :class:`~bson.binary.Binary` with subtype :data:`~bson.binary.UUID_SUBTYPE`. @@ -777,7 +761,7 @@ def _encrypt_helper( self, value: Any, algorithm: str, - key_id: Optional[Binary] = None, + key_id: Optional[Union[Binary, uuid.UUID]] = None, key_alt_name: Optional[str] = None, query_type: Optional[str] = None, contention_factor: Optional[int] = None, @@ -785,6 +769,8 @@ def _encrypt_helper( is_expression: bool = False, ) -> Any: self._check_closed() + if isinstance(key_id, uuid.UUID): + key_id = Binary.from_uuid(key_id) if key_id is not None and not ( isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE ): @@ -817,7 +803,7 @@ def encrypt( self, value: Any, algorithm: str, - key_id: Optional[Binary] = None, + key_id: Optional[Union[Binary, uuid.UUID]] = None, key_alt_name: Optional[str] = None, query_type: Optional[str] = None, contention_factor: Optional[int] = None, @@ -828,23 +814,24 @@ def encrypt( Note that exactly one of ``key_id`` or ``key_alt_name`` must be provided. - :Parameters: - - `value`: The BSON value to encrypt. - - `algorithm` (string): The encryption algorithm to use. See + :param value: The BSON value to encrypt. + :param algorithm` (string): The encryption algorithm to use. See :class:`Algorithm` for some valid options. - - `key_id`: Identifies a data key by ``_id`` which must be a + :param key_id: Identifies a data key by ``_id`` which must be a :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - - `query_type` (str): The query type to execute. See :class:`QueryType` for valid options. - - `contention_factor` (int): The contention factor to use + :param key_alt_name: Identifies a key vault document by 'keyAltName'. + :param query_type` (str): The query type to execute. See :class:`QueryType` for valid options. + :param contention_factor` (int): The contention factor to use when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. - - `range_opts`: Experimental only, not intended for public use. + :param range_opts: Experimental only, not intended for public use. - :Returns: - The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. + :return: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. + + .. versionchanged:: 4.7 + ``key_id`` can now be passed in as a :class:`uuid.UUID`. .. versionchanged:: 4.2 Added the `query_type` and `contention_factor` parameters. @@ -867,7 +854,7 @@ def encrypt_expression( self, expression: Mapping[str, Any], algorithm: str, - key_id: Optional[Binary] = None, + key_id: Optional[Union[Binary, uuid.UUID]] = None, key_alt_name: Optional[str] = None, query_type: Optional[str] = None, contention_factor: Optional[int] = None, @@ -878,24 +865,25 @@ def encrypt_expression( Note that exactly one of ``key_id`` or ``key_alt_name`` must be provided. - :Parameters: - - `expression`: The BSON aggregate or match expression to encrypt. - - `algorithm` (string): The encryption algorithm to use. See + :param expression: The BSON aggregate or match expression to encrypt. + :param algorithm` (string): The encryption algorithm to use. See :class:`Algorithm` for some valid options. - - `key_id`: Identifies a data key by ``_id`` which must be a + :param key_id: Identifies a data key by ``_id`` which must be a :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - - `query_type` (str): The query type to execute. See + :param key_alt_name: Identifies a key vault document by 'keyAltName'. + :param query_type` (str): The query type to execute. See :class:`QueryType` for valid options. - - `contention_factor` (int): The contention factor to use + :param contention_factor` (int): The contention factor to use when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. - - `range_opts`: Experimental only, not intended for public use. + :param range_opts: Experimental only, not intended for public use. + + :return: The encrypted expression, a :class:`~bson.RawBSONDocument`. - :Returns: - The encrypted expression, a :class:`~bson.RawBSONDocument`. + .. versionchanged:: 4.7 + ``key_id`` can now be passed in as a :class:`uuid.UUID`. .. versionadded:: 4.4 """ @@ -916,12 +904,10 @@ def encrypt_expression( def decrypt(self, value: Binary) -> Any: """Decrypt an encrypted value. - :Parameters: - - `value` (Binary): The encrypted value, a + :param value` (Binary): The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. - :Returns: - The decrypted BSON value. + :return: The decrypted BSON value. """ self._check_closed() if not (isinstance(value, Binary) and value.subtype == 6): @@ -935,13 +921,11 @@ def decrypt(self, value: Binary) -> Any: def get_key(self, id: Binary) -> Optional[RawBSONDocument]: """Get a data key by id. - :Parameters: - - `id` (Binary): The UUID of a key a which must be a + :param id` (Binary): The UUID of a key a which must be a :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - :Returns: - The key document. + :return: The key document. .. versionadded:: 4.2 """ @@ -952,8 +936,7 @@ def get_key(self, id: Binary) -> Optional[RawBSONDocument]: def get_keys(self) -> Cursor[RawBSONDocument]: """Get all of the data keys. - :Returns: - An instance of :class:`~pymongo.cursor.Cursor` over the data key + :return: An instance of :class:`~pymongo.cursor.Cursor` over the data key documents. .. versionadded:: 4.2 @@ -965,13 +948,11 @@ def get_keys(self) -> Cursor[RawBSONDocument]: def delete_key(self, id: Binary) -> DeleteResult: """Delete a key document in the key vault collection that has the given ``key_id``. - :Parameters: - - `id` (Binary): The UUID of a key a which must be a + :param id` (Binary): The UUID of a key a which must be a :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - :Returns: - The delete result. + :return: The delete result. .. versionadded:: 4.2 """ @@ -982,14 +963,12 @@ def delete_key(self, id: Binary) -> DeleteResult: def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: """Add ``key_alt_name`` to the set of alternate names in the key document with UUID ``key_id``. - :Parameters: - - ``id``: The UUID of a key a which must be a + :param `id`: The UUID of a key a which must be a :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - - ``key_alt_name``: The key alternate name to add. + :param `key_alt_name`: The key alternate name to add. - :Returns: - The previous version of the key document. + :return: The previous version of the key document. .. versionadded:: 4.2 """ @@ -1001,11 +980,9 @@ def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: """Get a key document in the key vault collection that has the given ``key_alt_name``. - :Parameters: - - `key_alt_name`: (str): The key alternate name of the key to get. + :param key_alt_name: (str): The key alternate name of the key to get. - :Returns: - The key document. + :return: The key document. .. versionadded:: 4.2 """ @@ -1018,14 +995,12 @@ def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSON Also removes the ``keyAltNames`` field from the key document if it would otherwise be empty. - :Parameters: - - ``id``: The UUID of a key a which must be a + :param `id`: The UUID of a key a which must be a :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - - ``key_alt_name``: The key alternate name to remove. + :param `key_alt_name`: The key alternate name to remove. - :Returns: - Returns the previous version of the key document. + :return: Returns the previous version of the key document. .. versionadded:: 4.2 """ @@ -1059,15 +1034,13 @@ def rewrap_many_data_key( ) -> RewrapManyDataKeyResult: """Decrypts and encrypts all matching data keys in the key vault with a possibly new `master_key` value. - :Parameters: - - `filter`: A document used to filter the data keys. - - `provider`: The new KMS provider to use to encrypt the data keys, + :param filter: A document used to filter the data keys. + :param provider: The new KMS provider to use to encrypt the data keys, or ``None`` to use the current KMS provider(s). - - ``master_key``: The master key fields corresponding to the new KMS + :param `master_key`: The master key fields corresponding to the new KMS provider when ``provider`` is not ``None``. - :Returns: - A :class:`RewrapManyDataKeyResult`. + :return: A :class:`RewrapManyDataKeyResult`. This method allows you to re-encrypt all of your data-keys with a new CMK, or master key. Note that this does *not* require re-encrypting any of the data in your encrypted collections, diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 61480467a3..1d5369977c 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -18,7 +18,7 @@ from typing import TYPE_CHECKING, Any, Mapping, Optional try: - import pymongocrypt # noqa: F401 + import pymongocrypt # type:ignore[import] # noqa: F401 _HAVE_PYMONGOCRYPT = True except ImportError: @@ -55,13 +55,13 @@ def __init__( ) -> None: """Options to configure automatic client-side field level encryption. - Automatic client-side field level encryption requires MongoDB 4.2 - enterprise or a MongoDB 4.2 Atlas cluster. Automatic encryption is not + Automatic client-side field level encryption requires MongoDB >=4.2 + enterprise or a MongoDB >=4.2 Atlas cluster. Automatic encryption is not supported for operations on a database or view and will result in error. - Although automatic encryption requires MongoDB 4.2 enterprise or a - MongoDB 4.2 Atlas cluster, automatic *decryption* is supported for all + Although automatic encryption requires MongoDB >=4.2 enterprise or a + MongoDB >=4.2 Atlas cluster, automatic *decryption* is supported for all users. To configure automatic *decryption* without automatic *encryption* set ``bypass_auto_encryption=True``. Explicit encryption and explicit decryption is also supported for all users @@ -69,8 +69,7 @@ def __init__( See :ref:`automatic-client-side-encryption` for an example. - :Parameters: - - `kms_providers`: Map of KMS provider options. The `kms_providers` + :param kms_providers: Map of KMS provider options. The `kms_providers` map values differ by provider: - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. @@ -95,16 +94,27 @@ def __init__( data keys. This key should be generated and stored as securely as possible. - - `key_vault_namespace`: The namespace for the key vault collection. + KMS providers may be specified with an optional name suffix + separated by a colon, for example "kmip:name" or "aws:name". + Named KMS providers do not support :ref:`CSFLE on-demand credentials`. + Named KMS providers enables more than one of each KMS provider type to be configured. + For example, to configure multiple local KMS providers:: + + kms_providers = { + "local": {"key": local_kek1}, # Unnamed KMS provider. + "local:myname": {"key": local_kek2}, # Named KMS provider with name "myname". + } + + :param key_vault_namespace: The namespace for the key vault collection. The key vault collection contains all data keys used for encryption and decryption. Data keys are stored as documents in this MongoDB collection. Data keys are protected with encryption by a KMS provider. - - `key_vault_client` (optional): By default the key vault collection + :param key_vault_client: By default, the key vault collection is assumed to reside in the same MongoDB cluster as the encrypted MongoClient. Use this option to route data key queries to a separate MongoDB cluster. - - `schema_map` (optional): Map of collection namespace ("db.coll") to + :param schema_map: Map of collection namespace ("db.coll") to JSON Schema. By default, a collection's JSONSchema is periodically polled with the listCollections command. But a JSONSchema may be specified locally with the schemaMap option. @@ -119,24 +129,24 @@ def __init__( automatic encryption for client side encryption. Other validation rules in the JSON schema will not be enforced by the driver and will result in an error. - - `bypass_auto_encryption` (optional): If ``True``, automatic + :param bypass_auto_encryption: If ``True``, automatic encryption will be disabled but automatic decryption will still be enabled. Defaults to ``False``. - - `mongocryptd_uri` (optional): The MongoDB URI used to connect + :param mongocryptd_uri: The MongoDB URI used to connect to the *local* mongocryptd process. Defaults to ``'mongodb://localhost:27020'``. - - `mongocryptd_bypass_spawn` (optional): If ``True``, the encrypted + :param mongocryptd_bypass_spawn: If ``True``, the encrypted MongoClient will not attempt to spawn the mongocryptd process. Defaults to ``False``. - - `mongocryptd_spawn_path` (optional): Used for spawning the + :param mongocryptd_spawn_path: Used for spawning the mongocryptd process. Defaults to ``'mongocryptd'`` and spawns mongocryptd from the system path. - - `mongocryptd_spawn_args` (optional): A list of string arguments to + :param mongocryptd_spawn_args: A list of string arguments to use when spawning the mongocryptd process. Defaults to ``['--idleShutdownTimeoutSecs=60']``. If the list does not include the ``idleShutdownTimeoutSecs`` option then ``'--idleShutdownTimeoutSecs=60'`` will be added. - - `kms_tls_options` (optional): A map of KMS provider names to TLS + :param kms_tls_options: A map of KMS provider names to TLS options to use when creating secure connections to KMS providers. Accepts the same TLS options as :class:`pymongo.mongo_client.MongoClient`. For example, to @@ -147,14 +157,14 @@ def __init__( Or to supply a client certificate:: kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} - - `crypt_shared_lib_path` (optional): Override the path to load the crypt_shared library. - - `crypt_shared_lib_required` (optional): If True, raise an error if libmongocrypt is + :param crypt_shared_lib_path: Override the path to load the crypt_shared library. + :param crypt_shared_lib_required: If True, raise an error if libmongocrypt is unable to load the crypt_shared library. - - `bypass_query_analysis` (optional): If ``True``, disable automatic analysis + :param bypass_query_analysis: If ``True``, disable automatic analysis of outgoing commands. Set `bypass_query_analysis` to use explicit encryption on indexed fields without the MongoDB Enterprise Advanced licensed crypt_shared library. - - `encrypted_fields_map`: Map of collection namespace ("db.coll") to documents + :param encrypted_fields_map: Map of collection namespace ("db.coll") to documents that described the encrypted fields for Queryable Encryption. For example:: { @@ -232,11 +242,10 @@ def __init__( .. note:: This feature is experimental only, and not intended for public use. - :Parameters: - - `sparsity`: An integer. - - `min`: A BSON scalar value corresponding to the type being queried. - - `max`: A BSON scalar value corresponding to the type being queried. - - `precision`: An integer, may only be set for double or decimal128 types. + :param sparsity: An integer. + :param min: A BSON scalar value corresponding to the type being queried. + :param max: A BSON scalar value corresponding to the type being queried. + :param precision: An integer, may only be set for double or decimal128 types. .. versionadded:: 4.4 """ diff --git a/pymongo/errors.py b/pymongo/errors.py index 3e11c1f697..a781e4a016 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -15,6 +15,7 @@ """Exceptions raised by PyMongo.""" from __future__ import annotations +from ssl import SSLCertVerificationError as _CertificateError # noqa: F401 from typing import TYPE_CHECKING, Any, Iterable, Mapping, Optional, Sequence, Union from bson.errors import InvalidDocument @@ -22,17 +23,6 @@ if TYPE_CHECKING: from pymongo.typings import _DocumentOut -try: - # CPython 3.7+ - from ssl import SSLCertVerificationError as _CertificateError -except ImportError: - try: - from ssl import CertificateError as _CertificateError - except ImportError: - - class _CertificateError(ValueError): # type: ignore - pass - class PyMongoError(Exception): """Base class for all PyMongo exceptions.""" diff --git a/pymongo/hello.py b/pymongo/hello.py index d38c285ab7..0f6d7a399a 100644 --- a/pymongo/hello.py +++ b/pymongo/hello.py @@ -218,3 +218,7 @@ def service_id(self) -> Optional[ObjectId]: @property def hello_ok(self) -> bool: return self._doc.get("helloOk", False) + + @property + def connection_id(self) -> Optional[int]: + return self._doc.get("connectionId") diff --git a/pymongo/helpers.py b/pymongo/helpers.py index cd7d434b08..080c3204a4 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -33,7 +33,6 @@ cast, ) -from bson.son import SON from pymongo import ASCENDING from pymongo.errors import ( CursorNotFound, @@ -84,12 +83,31 @@ 89, # NetworkTimeout 9001, # SocketException 262, # ExceededTimeLimit + 134, # ReadConcernMajorityNotAvailableYet ] ) # Server code raised when re-authentication is required _REAUTHENTICATION_REQUIRED_CODE: int = 391 +# Server code raised when authentication fails. +_AUTHENTICATION_FAILURE_CODE: int = 18 + +# Note - to avoid bugs from forgetting which if these is all lowercase and +# which are camelCase, and at the same time avoid having to add a test for +# every command, use all lowercase here and test against command_name.lower(). +_SENSITIVE_COMMANDS: set = { + "authenticate", + "saslstart", + "saslcontinue", + "getnonce", + "createuser", + "updateuser", + "copydbgetnonce", + "copydbsaslstart", + "copydb", +} + def _gen_index_name(keys: _IndexList) -> str: """Generate an index name from the set of fields it is over.""" @@ -124,7 +142,7 @@ def _index_list( return values -def _index_document(index_list: _IndexList) -> SON[str, Any]: +def _index_document(index_list: _IndexList) -> dict[str, Any]: """Helper to generate an index specifying document. Takes a list of (key, direction) pairs. @@ -136,7 +154,7 @@ def _index_document(index_list: _IndexList) -> SON[str, Any]: if not len(index_list): raise ValueError("key_or_list must not be empty") - index: SON[str, Any] = SON() + index: dict[str, Any] = {} if isinstance(index_list, abc.Mapping): for key in index_list: @@ -254,6 +272,8 @@ def _get_wce_doc(result: Mapping[str, Any]) -> Optional[Mapping[str, Any]]: # convenient to attach it to the writeConcernError doc itself. error_labels = result.get("errorLabels") if error_labels: + # Copy to avoid changing the original document. + wce = wce.copy() wce["errorLabels"] = error_labels return wce diff --git a/pymongo/logger.py b/pymongo/logger.py new file mode 100644 index 0000000000..2caafa778d --- /dev/null +++ b/pymongo/logger.py @@ -0,0 +1,169 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import enum +import logging +import os +import warnings +from typing import Any + +from bson import UuidRepresentation, json_util +from bson.json_util import JSONOptions, _truncate_documents +from pymongo.monitoring import ConnectionCheckOutFailedReason, ConnectionClosedReason + + +class _CommandStatusMessage(str, enum.Enum): + STARTED = "Command started" + SUCCEEDED = "Command succeeded" + FAILED = "Command failed" + + +class _ServerSelectionStatusMessage(str, enum.Enum): + STARTED = "Server selection started" + SUCCEEDED = "Server selection succeeded" + FAILED = "Server selection failed" + WAITING = "Waiting for suitable server to become available" + + +class _ConnectionStatusMessage(str, enum.Enum): + POOL_CREATED = "Connection pool created" + POOL_READY = "Connection pool ready" + POOL_CLOSED = "Connection pool closed" + POOL_CLEARED = "Connection pool cleared" + + CONN_CREATED = "Connection created" + CONN_READY = "Connection ready" + CONN_CLOSED = "Connection closed" + + CHECKOUT_STARTED = "Connection checkout started" + CHECKOUT_SUCCEEDED = "Connection checked out" + CHECKOUT_FAILED = "Connection checkout failed" + CHECKEDIN = "Connection checked in" + + +_DEFAULT_DOCUMENT_LENGTH = 1000 +_SENSITIVE_COMMANDS = [ + "authenticate", + "saslStart", + "saslContinue", + "getnonce", + "createUser", + "updateUser", + "copydbgetnonce", + "copydbsaslstart", + "copydb", +] +_HELLO_COMMANDS = ["hello", "ismaster", "isMaster"] +_REDACTED_FAILURE_FIELDS = ["code", "codeName", "errorLabels"] +_DOCUMENT_NAMES = ["command", "reply", "failure"] +_JSON_OPTIONS = JSONOptions(uuid_representation=UuidRepresentation.STANDARD) +_COMMAND_LOGGER = logging.getLogger("pymongo.command") +_CONNECTION_LOGGER = logging.getLogger("pymongo.connection") +_SERVER_SELECTION_LOGGER = logging.getLogger("pymongo.serverSelection") +_CLIENT_LOGGER = logging.getLogger("pymongo.client") +_VERBOSE_CONNECTION_ERROR_REASONS = { + ConnectionClosedReason.POOL_CLOSED: "Connection pool was closed", + ConnectionCheckOutFailedReason.POOL_CLOSED: "Connection pool was closed", + ConnectionClosedReason.STALE: "Connection pool was stale", + ConnectionClosedReason.ERROR: "An error occurred while using the connection", + ConnectionCheckOutFailedReason.CONN_ERROR: "An error occurred while trying to establish a new connection", + ConnectionClosedReason.IDLE: "Connection was idle too long", + ConnectionCheckOutFailedReason.TIMEOUT: "Connection exceeded the specified timeout", +} + + +def _debug_log(logger: logging.Logger, **fields: Any) -> None: + logger.debug(LogMessage(**fields)) + + +def _verbose_connection_error_reason(reason: str) -> str: + return _VERBOSE_CONNECTION_ERROR_REASONS.get(reason, reason) + + +def _info_log(logger: logging.Logger, **fields: Any) -> None: + logger.info(LogMessage(**fields)) + + +def _log_or_warn(logger: logging.Logger, message: str) -> None: + if logger.isEnabledFor(logging.INFO): + logger.info(message) + else: + # stacklevel=4 ensures that the warning is for the user's code. + warnings.warn(message, UserWarning, stacklevel=4) + + +class LogMessage: + __slots__ = ("_kwargs", "_redacted") + + def __init__(self, **kwargs: Any): + self._kwargs = kwargs + self._redacted = False + + def __str__(self) -> str: + self._redact() + return "%s" % ( + json_util.dumps( + self._kwargs, json_options=_JSON_OPTIONS, default=lambda o: o.__repr__() + ) + ) + + def _is_sensitive(self, doc_name: str) -> bool: + is_speculative_authenticate = ( + self._kwargs.pop("speculative_authenticate", False) + or "speculativeAuthenticate" in self._kwargs[doc_name] + ) + is_sensitive_command = ( + "commandName" in self._kwargs and self._kwargs["commandName"] in _SENSITIVE_COMMANDS + ) + + is_sensitive_hello = ( + self._kwargs["commandName"] in _HELLO_COMMANDS and is_speculative_authenticate + ) + + return is_sensitive_command or is_sensitive_hello + + def _redact(self) -> None: + if self._redacted: + return + self._kwargs = {k: v for k, v in self._kwargs.items() if v is not None} + if "durationMS" in self._kwargs and hasattr(self._kwargs["durationMS"], "total_seconds"): + self._kwargs["durationMS"] = self._kwargs["durationMS"].total_seconds() * 1000 + if "serviceId" in self._kwargs: + self._kwargs["serviceId"] = str(self._kwargs["serviceId"]) + document_length = int(os.getenv("MONGOB_LOG_MAX_DOCUMENT_LENGTH", _DEFAULT_DOCUMENT_LENGTH)) + if document_length < 0: + document_length = _DEFAULT_DOCUMENT_LENGTH + is_server_side_error = self._kwargs.pop("isServerSideError", False) + + for doc_name in _DOCUMENT_NAMES: + doc = self._kwargs.get(doc_name) + if doc: + if doc_name == "failure" and is_server_side_error: + doc = {k: v for k, v in doc.items() if k in _REDACTED_FAILURE_FIELDS} + if doc_name != "failure" and self._is_sensitive(doc_name): + doc = json_util.dumps({}) + else: + truncated_doc = _truncate_documents(doc, document_length)[0] + doc = json_util.dumps( + truncated_doc, + json_options=_JSON_OPTIONS, + default=lambda o: o.__repr__(), + ) + if len(doc) > document_length: + doc = ( + doc.encode()[:document_length].decode("unicode-escape", "ignore") + ) + "..." + self._kwargs[doc_name] = doc + self._redacted = True diff --git a/pymongo/message.py b/pymongo/message.py index c04f4a8874..9412dc9149 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -22,6 +22,7 @@ from __future__ import annotations import datetime +import logging import random import struct from io import BytesIO as _BytesIO @@ -35,7 +36,6 @@ NoReturn, Optional, Union, - cast, ) import bson @@ -47,7 +47,6 @@ RawBSONDocument, _inflate_bson, ) -from bson.son import SON try: from pymongo import _cmessage # type: ignore[attr-defined] @@ -67,6 +66,7 @@ ) from pymongo.hello import HelloCompat from pymongo.helpers import _handle_reauth +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -129,7 +129,7 @@ def _maybe_add_read_preference( # the secondaryOkay bit has the same effect). if mode and (mode != ReadPreference.SECONDARY_PREFERRED.mode or len(document) > 1): if "$query" not in spec: - spec = SON([("$query", spec)]) + spec = {"$query": spec} spec["$readPreference"] = document return spec @@ -175,33 +175,29 @@ def _convert_write_result( return res -_OPTIONS = SON( - [ - ("tailable", 2), - ("oplogReplay", 8), - ("noCursorTimeout", 16), - ("awaitData", 32), - ("allowPartialResults", 128), - ] -) +_OPTIONS = { + "tailable": 2, + "oplogReplay": 8, + "noCursorTimeout": 16, + "awaitData": 32, + "allowPartialResults": 128, +} -_MODIFIERS = SON( - [ - ("$query", "filter"), - ("$orderby", "sort"), - ("$hint", "hint"), - ("$comment", "comment"), - ("$maxScan", "maxScan"), - ("$maxTimeMS", "maxTimeMS"), - ("$max", "max"), - ("$min", "min"), - ("$returnKey", "returnKey"), - ("$showRecordId", "showRecordId"), - ("$showDiskLoc", "showRecordId"), # <= MongoDb 3.0 - ("$snapshot", "snapshot"), - ] -) +_MODIFIERS = { + "$query": "filter", + "$orderby": "sort", + "$hint": "hint", + "$comment": "comment", + "$maxScan": "maxScan", + "$maxTimeMS": "maxTimeMS", + "$max": "max", + "$min": "min", + "$returnKey": "returnKey", + "$showRecordId": "showRecordId", + "$showDiskLoc": "showRecordId", # <= MongoDb 3.0 + "$snapshot": "snapshot", +} def _gen_find_command( @@ -216,9 +212,9 @@ def _gen_find_command( collation: Optional[Mapping[str, Any]] = None, session: Optional[ClientSession] = None, allow_disk_use: Optional[bool] = None, -) -> SON[str, Any]: +) -> dict[str, Any]: """Generate a find command document.""" - cmd: SON[str, Any] = SON([("find", coll)]) + cmd: dict[str, Any] = {"find": coll} if "$query" in spec: cmd.update( [ @@ -262,9 +258,9 @@ def _gen_get_more_command( max_await_time_ms: Optional[int], comment: Optional[Any], conn: Connection, -) -> SON[str, Any]: +) -> dict[str, Any]: """Generate a getMore command document.""" - cmd: SON[str, Any] = SON([("getMore", cursor_id), ("collection", coll)]) + cmd: dict[str, Any] = {"getMore": cursor_id, "collection": coll} if batch_size: cmd["batchSize"] = batch_size if max_await_time_ms is not None: @@ -337,7 +333,7 @@ def __init__( self.client = client self.allow_disk_use = allow_disk_use self.name = "find" - self._as_command: Optional[tuple[SON[str, Any], str]] = None + self._as_command: Optional[tuple[dict[str, Any], str]] = None self.exhaust = exhaust def reset(self) -> None: @@ -364,7 +360,7 @@ def use_command(self, conn: Connection) -> bool: def as_command( self, conn: Connection, apply_timeout: bool = False - ) -> tuple[SON[str, Any], str]: + ) -> tuple[dict[str, Any], str]: """Return a find command document for this query.""" # We use the command twice: on the wire and for command monitoring. # Generate it once, for speed and to avoid repeating side-effects. @@ -372,7 +368,7 @@ def as_command( return self._as_command explain = "$explain" in self.spec - cmd: SON[str, Any] = _gen_find_command( + cmd: dict[str, Any] = _gen_find_command( self.coll, self.spec, self.fields, @@ -387,7 +383,7 @@ def as_command( ) if explain: self.name = "explain" - cmd = SON([("explain", cmd)]) + cmd = {"explain": cmd} session = self.session conn.add_server_api(cmd) if session: @@ -399,7 +395,7 @@ def as_command( # Support auto encryption client = self.client if client._encrypter and not client._encrypter._bypass_auto_encryption: - cmd = cast(SON[str, Any], client._encrypter.encrypt(self.db, cmd, self.codec_options)) + cmd = client._encrypter.encrypt(self.db, cmd, self.codec_options) # Support CSOT if apply_timeout: conn.apply_timeout(client, cmd) @@ -505,7 +501,7 @@ def __init__( self.client = client self.max_await_time_ms = max_await_time_ms self.conn_mgr = conn_mgr - self._as_command: Optional[tuple[SON[str, Any], str]] = None + self._as_command: Optional[tuple[dict[str, Any], str]] = None self.exhaust = exhaust self.comment = comment @@ -528,13 +524,13 @@ def use_command(self, conn: Connection) -> bool: def as_command( self, conn: Connection, apply_timeout: bool = False - ) -> tuple[SON[str, Any], str]: + ) -> tuple[dict[str, Any], str]: """Return a getMore command document for this query.""" # See _Query.as_command for an explanation of this caching. if self._as_command is not None: return self._as_command - cmd: SON[str, Any] = _gen_get_more_command( + cmd: dict[str, Any] = _gen_get_more_command( self.cursor_id, self.coll, self.ntoreturn, @@ -549,7 +545,7 @@ def as_command( # Support auto encryption client = self.client if client._encrypter and not client._encrypter._bypass_auto_encryption: - cmd = cast(SON[str, Any], client._encrypter.encrypt(self.db, cmd, self.codec_options)) + cmd = client._encrypter.encrypt(self.db, cmd, self.codec_options) # Support CSOT if apply_timeout: conn.apply_timeout(client, cmd=None) @@ -940,7 +936,7 @@ def __init__( self.publish = listeners.enabled_for_commands self.name = cmd_name self.field = _FIELD_MAP[self.name] - self.start_time = datetime.datetime.now() if self.publish else None + self.start_time = datetime.datetime.now() self.session = session self.compress = bool(conn.compression_context) self.op_type = op_type @@ -961,7 +957,7 @@ def execute( self, cmd: MutableMapping[str, Any], docs: list[Mapping[str, Any]], client: MongoClient ) -> tuple[Mapping[str, Any], list[Mapping[str, Any]]]: request_id, msg, to_send = self.__batch_command(cmd, docs) - result = self.write_command(cmd, request_id, msg, to_send) + result = self.write_command(cmd, request_id, msg, to_send, client) client._process_response(result, self.session) return result, to_send @@ -974,7 +970,7 @@ def execute_unack( # without receiving a result. Send 0 for max_doc_size # to disable size checking. Size checking is handled while # the documents are encoded to BSON. - self.unack_write(cmd, request_id, msg, 0, to_send) + self.unack_write(cmd, request_id, msg, 0, to_send, client) return to_send @property @@ -1007,33 +1003,82 @@ def unack_write( msg: bytes, max_doc_size: int, docs: list[Mapping[str, Any]], + client: MongoClient, ) -> Optional[Mapping[str, Any]]: """A proxy for Connection.unack_write that handles event publishing.""" + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.STARTED, + command=cmd, + commandName=next(iter(cmd)), + databaseName=self.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=self.conn.id, + serverConnectionId=self.conn.server_connection_id, + serverHost=self.conn.address[0], + serverPort=self.conn.address[1], + serviceId=self.conn.service_id, + ) if self.publish: - assert self.start_time is not None - duration = datetime.datetime.now() - self.start_time cmd = self._start(cmd, request_id, docs) - start = datetime.datetime.now() try: result = self.conn.unack_write(msg, max_doc_size) # type: ignore[func-returns-value] + duration = datetime.datetime.now() - self.start_time + if result is not None: + reply = _convert_write_result(self.name, cmd, result) + else: + # Comply with APM spec. + reply = {"ok": 1} + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.SUCCEEDED, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=self.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=self.conn.id, + serverConnectionId=self.conn.server_connection_id, + serverHost=self.conn.address[0], + serverPort=self.conn.address[1], + serviceId=self.conn.service_id, + ) if self.publish: - duration = (datetime.datetime.now() - start) + duration - if result is not None: - reply = _convert_write_result(self.name, cmd, result) - else: - # Comply with APM spec. - reply = {"ok": 1} self._succeed(request_id, reply, duration) except Exception as exc: + duration = datetime.datetime.now() - self.start_time + if isinstance(exc, OperationFailure): + failure: _DocumentOut = _convert_write_result(self.name, cmd, exc.details) # type: ignore[arg-type] + elif isinstance(exc, NotPrimaryError): + failure = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.FAILED, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=self.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=self.conn.id, + serverConnectionId=self.conn.server_connection_id, + serverHost=self.conn.address[0], + serverPort=self.conn.address[1], + serviceId=self.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) if self.publish: assert self.start_time is not None - duration = (datetime.datetime.now() - start) + duration - if isinstance(exc, OperationFailure): - failure: _DocumentOut = _convert_write_result(self.name, cmd, exc.details) # type: ignore[arg-type] - elif isinstance(exc, NotPrimaryError): - failure = exc.details # type: ignore[assignment] - else: - failure = _convert_exception(exc) self._fail(request_id, failure, duration) raise finally: @@ -1047,25 +1092,76 @@ def write_command( request_id: int, msg: bytes, docs: list[Mapping[str, Any]], + client: MongoClient, ) -> dict[str, Any]: """A proxy for SocketInfo.write_command that handles event publishing.""" + cmd[self.field] = docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.STARTED, + command=cmd, + commandName=next(iter(cmd)), + databaseName=self.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=self.conn.id, + serverConnectionId=self.conn.server_connection_id, + serverHost=self.conn.address[0], + serverPort=self.conn.address[1], + serviceId=self.conn.service_id, + ) if self.publish: - assert self.start_time is not None - duration = datetime.datetime.now() - self.start_time self._start(cmd, request_id, docs) - start = datetime.datetime.now() try: reply = self.conn.write_command(request_id, msg, self.codec) + duration = datetime.datetime.now() - self.start_time + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.SUCCEEDED, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=self.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=self.conn.id, + serverConnectionId=self.conn.server_connection_id, + serverHost=self.conn.address[0], + serverPort=self.conn.address[1], + serviceId=self.conn.service_id, + ) if self.publish: - duration = (datetime.datetime.now() - start) + duration self._succeed(request_id, reply, duration) except Exception as exc: + duration = datetime.datetime.now() - self.start_time + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.FAILED, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=self.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=self.conn.id, + serverConnectionId=self.conn.server_connection_id, + serverHost=self.conn.address[0], + serverPort=self.conn.address[1], + serviceId=self.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if self.publish: - duration = (datetime.datetime.now() - start) + duration - if isinstance(exc, (NotPrimaryError, OperationFailure)): - failure: _DocumentOut = exc.details # type: ignore[assignment] - else: - failure = _convert_exception(exc) self._fail(request_id, failure, duration) raise finally: @@ -1082,6 +1178,7 @@ def _start( self.db_name, request_id, self.conn.address, + self.conn.server_connection_id, self.op_id, self.conn.service_id, ) @@ -1095,6 +1192,7 @@ def _succeed(self, request_id: int, reply: _DocumentOut, duration: timedelta) -> self.name, request_id, self.conn.address, + self.conn.server_connection_id, self.op_id, self.conn.service_id, database_name=self.db_name, @@ -1108,6 +1206,7 @@ def _fail(self, request_id: int, failure: _DocumentOut, duration: timedelta) -> self.name, request_id, self.conn.address, + self.conn.server_connection_id, self.op_id, self.conn.service_id, database_name=self.db_name, @@ -1126,7 +1225,7 @@ class _EncryptedBulkWriteContext(_BulkWriteContext): def __batch_command( self, cmd: MutableMapping[str, Any], docs: list[Mapping[str, Any]] - ) -> tuple[MutableMapping[str, Any], list[Mapping[str, Any]]]: + ) -> tuple[dict[str, Any], list[Mapping[str, Any]]]: namespace = self.db_name + ".$cmd" msg, to_send = _encode_batched_write_command( namespace, self.op_type, cmd, docs, self.codec, self @@ -1475,8 +1574,7 @@ def raw_response( Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or OperationFailure. - :Parameters: - - `cursor_id` (optional): cursor_id we sent to get this response - + :param cursor_id: cursor_id we sent to get this response - used for raising an informative exception when we get cursor id not valid at server response. """ @@ -1525,13 +1623,12 @@ def unpack_response( Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or OperationFailure. - :Parameters: - - `cursor_id` (optional): cursor_id we sent to get this response - + :param cursor_id: cursor_id we sent to get this response - used for raising an informative exception when we get cursor id not valid at server response - - `codec_options` (optional): an instance of + :param codec_options: an instance of :class:`~bson.codec_options.CodecOptions` - - `user_fields` (optional): Response fields that should be decoded + :param user_fields: Response fields that should be decoded using the TypeDecoders from codec_options, passed to bson._decode_all_selective. """ @@ -1606,11 +1703,10 @@ def unpack_response( ) -> list[dict[str, Any]]: """Unpack a OP_MSG command response. - :Parameters: - - `cursor_id` (optional): Ignored, for compatibility with _OpReply. - - `codec_options` (optional): an instance of + :param cursor_id: Ignored, for compatibility with _OpReply. + :param codec_options: an instance of :class:`~bson.codec_options.CodecOptions` - - `user_fields` (optional): Response fields that should be decoded + :param user_fields: Response fields that should be decoded using the TypeDecoders from codec_options, passed to bson._decode_all_selective. """ diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 5d3cfcd832..89d61500ca 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -55,9 +55,7 @@ cast, ) -import bson -from bson.codec_options import DEFAULT_CODEC_OPTIONS, TypeRegistry -from bson.son import SON +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry from bson.timestamp import Timestamp from pymongo import ( _csot, @@ -84,9 +82,12 @@ PyMongoError, ServerSelectionTimeoutError, WaitQueueTimeoutError, + WriteConcernError, ) from pymongo.lock import _HAS_REGISTER_AT_FORK, _create_lock, _release_locks -from pymongo.pool import ConnectionClosedReason +from pymongo.logger import _CLIENT_LOGGER, _log_or_warn +from pymongo.monitoring import ConnectionClosedReason +from pymongo.operations import _Op from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_selectors import writable_server_selector from pymongo.server_type import SERVER_TYPE @@ -246,28 +247,27 @@ def __init__( URI or keyword parameters. If the same option is passed in a URI and as a keyword parameter the keyword parameter takes precedence. - :Parameters: - - `host` (optional): hostname or IP address or Unix domain socket + :param host: hostname or IP address or Unix domain socket path of a single mongod or mongos instance to connect to, or a mongodb URI, or a list of hostnames (but no more than one mongodb URI). If `host` is an IPv6 literal it must be enclosed in '[' and ']' characters following the RFC2732 URL syntax (e.g. '[::1]' for localhost). Multihomed and round robin DNS addresses are **not** supported. - - `port` (optional): port number on which to connect - - `document_class` (optional): default class to use for + :param port: port number on which to connect + :param document_class: default class to use for documents returned from queries on this client - - `tz_aware` (optional): if ``True``, + :param tz_aware: if ``True``, :class:`~datetime.datetime` instances returned as values in a document by this :class:`MongoClient` will be timezone aware (otherwise they will be naive) - - `connect` (optional): if ``True`` (the default), immediately + :param connect: if ``True`` (the default), immediately begin connecting to MongoDB in the background. Otherwise connect on the first operation. - - `type_registry` (optional): instance of + :param type_registry: instance of :class:`~bson.codec_options.TypeRegistry` to enable encoding and decoding of custom types. - - `datetime_conversion`: Specifies how UTC datetimes should be decoded + :param datetime_conversion: Specifies how UTC datetimes should be decoded within BSON. Valid options include 'datetime_ms' to return as a DatetimeMS, 'datetime' to return as a datetime.datetime and raising a ValueError for out-of-range values, 'datetime_auto' to @@ -438,8 +438,8 @@ def __init__( primary (e.g. w=3 means write to the primary and wait until replicated to **two** secondaries). Passing w=0 **disables write acknowledgement** and all other write concern options. - - `wTimeoutMS`: (integer) Used in conjunction with `w`. Specify a value - in milliseconds to control how long to wait for write propagation + - `wTimeoutMS`: **DEPRECATED** (integer) Used in conjunction with `w`. + Specify a value in milliseconds to control how long to wait for write propagation to complete. If replication does not complete in the given timeframe, a timeout exception is raised. Passing wTimeoutMS=0 will cause **write operations to wait indefinitely**. @@ -716,6 +716,9 @@ def __init__( Not:: client.__my_database__ + + .. versionchanged:: 4.7 + Deprecated parameter ``wTimeoutMS``, use :meth:`~pymongo.timeout`. """ doc_class = document_class or dict self.__init_kwargs: dict[str, Any] = { @@ -789,6 +792,10 @@ def __init__( if not seeds: raise ConfigurationError("need to specify at least one host") + for hostname in [node[0] for node in seeds]: + if _detect_external_db(hostname): + break + # Add options with named keyword arguments to the parsed kwarg options. if type_registry is not None: keyword_opts["type_registry"] = type_registry @@ -855,6 +862,7 @@ def __init__( server_monitoring_mode=options.server_monitoring_mode, ) + self._opened = False self._init_background() if connect: @@ -872,8 +880,11 @@ def __init__( # This will be used later if we fork. MongoClient._clients[self._topology._topology_id] = self - def _init_background(self) -> None: + def _init_background(self, old_pid: Optional[int] = None) -> None: self._topology = Topology(self._topology_settings) + # Seed the topology with the old one's pid so we can detect clients + # that are opened before a fork and used after. + self._topology._pid = old_pid def target() -> bool: client = self_ref() @@ -893,10 +904,13 @@ def target() -> bool: # this closure. When the client is freed, stop the executor soon. self_ref: Any = weakref.ref(self, executor.close) self._kill_cursors_executor = executor + self._opened = False def _after_fork(self) -> None: """Resets topology in a child after successfully forking.""" - self._init_background() + self._init_background(self._topology._pid) + # Reset the session pool to avoid duplicate sessions in the child process. + self._topology._session_pool.reset() def _duplicate(self, **kwargs: Any) -> MongoClient: args = self.__init_kwargs.copy() @@ -914,7 +928,7 @@ def _server_property(self, attr_name: str) -> Any: the server may change. In such cases, store a local reference to a ServerDescription first, then use its properties. """ - server = self._topology.select_server(writable_server_selector) + server = self._get_topology().select_server(writable_server_selector, _Op.TEST) return getattr(server.description, attr_name) @@ -971,47 +985,45 @@ def watch( For a precise description of the resume process see the `change streams specification`_. - :Parameters: - - `pipeline` (optional): A list of aggregation pipeline stages to + :param pipeline: A list of aggregation pipeline stages to append to an initial ``$changeStream`` stage. Not all pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - - `full_document` (optional): The fullDocument to pass as an option + :param full_document: The fullDocument to pass as an option to the ``$changeStream`` stage. Allowed values: 'updateLookup', 'whenAvailable', 'required'. When set to 'updateLookup', the change notification for partial updates will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred. - - `full_document_before_change`: Allowed values: 'whenAvailable' + :param full_document_before_change: Allowed values: 'whenAvailable' and 'required'. Change events may now result in a 'fullDocumentBeforeChange' response field. - - `resume_after` (optional): A resume token. If provided, the + :param resume_after: A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token is the _id value of a change document. - - `max_await_time_ms` (optional): The maximum time in milliseconds + :param max_await_time_ms: The maximum time in milliseconds for the server to wait for changes before responding to a getMore operation. - - `batch_size` (optional): The maximum number of documents to return + :param batch_size: The maximum number of documents to return per batch. - - `collation` (optional): The :class:`~pymongo.collation.Collation` + :param collation: The :class:`~pymongo.collation.Collation` to use for the aggregation. - - `start_at_operation_time` (optional): If provided, the resulting + :param start_at_operation_time: If provided, the resulting change stream will only return changes that occurred at or after the specified :class:`~bson.timestamp.Timestamp`. Requires MongoDB >= 4.0. - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `start_after` (optional): The same as `resume_after` except that + :param start_after: The same as `resume_after` except that `start_after` can resume notifications after an invalidate event. This option and `resume_after` are mutually exclusive. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `show_expanded_events` (optional): Include expanded events such as DDL events like `dropIndexes`. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. - :Returns: - A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. + :return: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. .. versionchanged:: 4.3 Added `show_expanded_events` parameter. @@ -1030,7 +1042,7 @@ def watch( .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md """ return ClusterChangeStream( self.admin, @@ -1062,8 +1074,7 @@ def topology_description(self) -> TopologyDescription: to get a more recent :class:`~pymongo.topology_description.TopologyDescription`. - :Returns: - An instance of + :return: An instance of :class:`~pymongo.topology_description.TopologyDescription`. .. versionadded:: 4.0 @@ -1175,8 +1186,7 @@ def nodes(self) -> FrozenSet[_Address]: def options(self) -> ClientOptions: """The configuration options for this client. - :Returns: - An instance of :class:`~pymongo.client_options.ClientOptions`. + :return: An instance of :class:`~pymongo.client_options.ClientOptions`. .. versionadded:: 4.0 """ @@ -1187,7 +1197,9 @@ def _end_sessions(self, session_ids: list[_ServerSession]) -> None: try: # Use Connection.command directly to avoid implicitly creating # another session. - with self._conn_for_reads(ReadPreference.PRIMARY_PREFERRED, None) as ( + with self._conn_for_reads( + ReadPreference.PRIMARY_PREFERRED, None, operation=_Op.END_SESSIONS + ) as ( conn, read_pref, ): @@ -1195,7 +1207,7 @@ def _end_sessions(self, session_ids: list[_ServerSession]) -> None: return for i in range(0, len(session_ids), common._MAX_END_SESSIONS): - spec = SON([("endSessions", session_ids[i : i + common._MAX_END_SESSIONS])]) + spec = {"endSessions": session_ids[i : i + common._MAX_END_SESSIONS]} conn.command("admin", spec, read_preference=read_pref, client=self) except PyMongoError: # Drivers MUST ignore any errors returned by the endSessions @@ -1235,9 +1247,11 @@ def _get_topology(self) -> Topology: If this client was created with "connect=False", calling _get_topology launches the connection process in the background. """ - self._topology.open() - with self.__lock: - self._kill_cursors_executor.open() + if not self._opened: + self._topology.open() + with self.__lock: + self._kill_cursors_executor.open() + self._opened = True return self._topology @contextlib.contextmanager @@ -1276,16 +1290,19 @@ def _select_server( self, server_selector: Callable[[Selection], Selection], session: Optional[ClientSession], + operation: str, address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, ) -> Server: """Select a server to run an operation on this client. - :Parameters: - - `server_selector`: The server selector to use if the session is + :param server_selector: The server selector to use if the session is not pinned and no address is given. - - `session`: The ClientSession for the next operation, or None. May + :param session: The ClientSession for the next operation, or None. May be pinned to a mongos server address. - - `address` (optional): Address when sending a message + :param operation: The name of the operation that the server is being selected for. + :param address: Address when sending a message to a specific server, used for getMore. """ try: @@ -1296,11 +1313,18 @@ def _select_server( address = session._pinned_address if address: # We're running a getMore or this session is pinned to a mongos. - server = topology.select_server_by_address(address) + server = topology.select_server_by_address( + address, operation, operation_id=operation_id + ) if not server: raise AutoReconnect("server %s:%s no longer available" % address) # noqa: UP031 else: - server = topology.select_server(server_selector) + server = topology.select_server( + server_selector, + operation, + deprioritized_servers=deprioritized_servers, + operation_id=operation_id, + ) return server except PyMongoError as exc: # Server selection errors in a transaction are transient. @@ -1309,8 +1333,10 @@ def _select_server( session._unpin() raise - def _conn_for_writes(self, session: Optional[ClientSession]) -> ContextManager[Connection]: - server = self._select_server(writable_server_selector, session) + def _conn_for_writes( + self, session: Optional[ClientSession], operation: str + ) -> ContextManager[Connection]: + server = self._select_server(writable_server_selector, session, operation) return self._checkout(server, session) @contextlib.contextmanager @@ -1324,8 +1350,9 @@ def _conn_from_server( # always send primaryPreferred when directly connected to a repl set # member. # Thread safe: if the type is single it cannot change. - topology = self._get_topology() - single = topology.description.topology_type == TOPOLOGY_TYPE.Single + # NOTE: We already opened the Topology when selecting a server so there's no need + # to call _get_topology() again. + single = self._topology.description.topology_type == TOPOLOGY_TYPE.Single with self._checkout(server, session) as conn: if single: @@ -1339,11 +1366,13 @@ def _conn_from_server( yield conn, read_preference def _conn_for_reads( - self, read_preference: _ServerMode, session: Optional[ClientSession] + self, + read_preference: _ServerMode, + session: Optional[ClientSession], + operation: str, ) -> ContextManager[tuple[Connection, _ServerMode]]: assert read_preference is not None, "read_preference must not be None" - _ = self._get_topology() - server = self._select_server(read_preference, session) + server = self._select_server(read_preference, session, operation) return self._conn_from_server(read_preference, server, session) def _should_pin_cursor(self, session: Optional[ClientSession]) -> Optional[bool]: @@ -1358,15 +1387,17 @@ def _run_operation( ) -> Response: """Run a _Query/_GetMore operation and return a Response. - :Parameters: - - `operation`: a _Query or _GetMore object. - - `unpack_res`: A callable that decodes the wire protocol response. - - `address` (optional): Optional address when sending a message + :param operation: a _Query or _GetMore object. + :param unpack_res: A callable that decodes the wire protocol response. + :param address: Optional address when sending a message to a specific server, used for getMore. """ if operation.conn_mgr: server = self._select_server( - operation.read_preference, operation.session, address=address + operation.read_preference, + operation.session, + operation.name, + address=address, ) with operation.conn_mgr.lock: @@ -1378,6 +1409,7 @@ def _run_operation( operation.read_preference, self._event_listeners, unpack_res, + self, ) def _cmd( @@ -1388,7 +1420,12 @@ def _cmd( ) -> Response: operation.reset() # Reset op in case of retry. return server.run_operation( - conn, operation, read_preference, self._event_listeners, unpack_res + conn, + operation, + read_preference, + self._event_listeners, + unpack_res, + self, ) return self._retryable_read( @@ -1397,6 +1434,7 @@ def _cmd( operation.session, address=address, retryable=isinstance(operation, message._Query), + operation=operation.name, ) def _retry_with_session( @@ -1405,6 +1443,8 @@ def _retry_with_session( func: _WriteCall[T], session: Optional[ClientSession], bulk: Optional[_Bulk], + operation: str, + operation_id: Optional[int] = None, ) -> T: """Execute an operation with at most one consecutive retries @@ -1422,7 +1462,9 @@ def _retry_with_session( func=func, session=session, bulk=bulk, + operation=operation, retryable=retryable, + operation_id=operation_id, ) @_csot.apply @@ -1431,34 +1473,37 @@ def _retry_internal( func: _WriteCall[T] | _ReadCall[T], session: Optional[ClientSession], bulk: Optional[_Bulk], + operation: str, is_read: bool = False, address: Optional[_Address] = None, read_pref: Optional[_ServerMode] = None, retryable: bool = False, + operation_id: Optional[int] = None, ) -> T: """Internal retryable helper for all client transactions. - :Parameters: - - `func`: Callback function we want to retry - - `session`: Client Session on which the transaction should occur - - `bulk`: Abstraction to handle bulk write operations - - `is_read`: If this is an exclusive read transaction, defaults to False - - `address`: Server Address, defaults to None - - `read_pref`: Topology of read operation, defaults to None - - `retryable`: If the operation should be retried once, defaults to None - - :Returns: - Output of the calling func() + :param func: Callback function we want to retry + :param session: Client Session on which the transaction should occur + :param bulk: Abstraction to handle bulk write operations + :param operation: The name of the operation that the server is being selected for + :param is_read: If this is an exclusive read transaction, defaults to False + :param address: Server Address, defaults to None + :param read_pref: Topology of read operation, defaults to None + :param retryable: If the operation should be retried once, defaults to None + + :return: Output of the calling func() """ return _ClientConnectionRetryable( mongo_client=self, func=func, bulk=bulk, + operation=operation, is_read=is_read, session=session, read_pref=read_pref, address=address, retryable=retryable, + operation_id=operation_id, ).run() def _retryable_read( @@ -1466,8 +1511,10 @@ def _retryable_read( func: _ReadCall[T], read_pref: _ServerMode, session: Optional[ClientSession], + operation: str, address: Optional[_Address] = None, retryable: bool = True, + operation_id: Optional[int] = None, ) -> T: """Execute an operation with consecutive retries if possible @@ -1476,11 +1523,12 @@ def _retryable_read( Re-raises any exception thrown by func(). - - `func`: Read call we want to execute - - `read_pref`: Desired topology of read operation - - `session`: Client session we should use to execute operation - - `address`: Optional address when sending a message, defaults to None - - `retryable`: if we should attempt retries + :param func: Read call we want to execute + :param read_pref: Desired topology of read operation + :param session: Client session we should use to execute operation + :param operation: The name of the operation that the server is being selected for + :param address: Optional address when sending a message, defaults to None + :param retryable: if we should attempt retries (may not always be supported even if supplied), defaults to False """ @@ -1493,10 +1541,12 @@ def _retryable_read( func, session, None, + operation, is_read=True, address=address, read_pref=read_pref, retryable=retryable, + operation_id=operation_id, ) def _retryable_write( @@ -1504,7 +1554,9 @@ def _retryable_write( retryable: bool, func: _WriteCall[T], session: Optional[ClientSession], + operation: str, bulk: Optional[_Bulk] = None, + operation_id: Optional[int] = None, ) -> T: """Execute an operation with consecutive retries if possible @@ -1513,14 +1565,14 @@ def _retryable_write( Re-raises any exception thrown by func(). - :Parameters: - - `retryable`: if we should attempt retries (may not always be supported) - - `func`: write call we want to execute during a session - - `session`: Client session we will use to execute write operation - - `bulk`: bulk abstraction to execute operations in bulk, defaults to None + :param retryable: if we should attempt retries (may not always be supported) + :param func: write call we want to execute during a session + :param session: Client session we will use to execute write operation + :param operation: The name of the operation that the server is being selected for + :param bulk: bulk abstraction to execute operations in bulk, defaults to None """ with self._tmp_session(session) as s: - return self._retry_with_session(retryable, func, s, bulk) + return self._retry_with_session(retryable, func, s, bulk, operation, operation_id) def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): @@ -1575,8 +1627,7 @@ def __getattr__(self, name: str) -> database.Database[_DocumentType]: Raises :class:`~pymongo.errors.InvalidName` if an invalid database name is used. - :Parameters: - - `name`: the name of the database to get + :param name: the name of the database to get """ if name.startswith("_"): raise AttributeError( @@ -1591,8 +1642,7 @@ def __getitem__(self, name: str) -> database.Database[_DocumentType]: Raises :class:`~pymongo.errors.InvalidName` if an invalid database name is used. - :Parameters: - - `name`: the name of the database to get + :param name: the name of the database to get """ return database.Database(self, name) @@ -1611,13 +1661,12 @@ def _cleanup_cursor( pinned connection or implicit session attached at the time the cursor was closed or garbage collected. - :Parameters: - - `locks_allowed`: True if we are allowed to acquire locks. - - `cursor_id`: The cursor id which may be 0. - - `address`: The _CursorAddress. - - `conn_mgr`: The _ConnectionManager for the pinned connection or None. - - `session`: The cursor's session. - - `explicit_session`: True if the session was passed explicitly. + :param locks_allowed: True if we are allowed to acquire locks. + :param cursor_id: The cursor id which may be 0. + :param address: The _CursorAddress. + :param conn_mgr: The _ConnectionManager for the pinned connection or None. + :param session: The cursor's session. + :param explicit_session: True if the session was passed explicitly. """ if locks_allowed: if cursor_id: @@ -1636,7 +1685,7 @@ def _cleanup_cursor( if cursor_id or conn_mgr: self._close_cursor_soon(cursor_id, address, conn_mgr) if session and not explicit_session: - session._end_session(lock=locks_allowed) + session.end_session() def _close_cursor_soon( self, @@ -1685,10 +1734,10 @@ def _kill_cursors( if address: # address could be a tuple or _CursorAddress, but # select_server_by_address needs (host, port). - server = topology.select_server_by_address(tuple(address)) # type: ignore[arg-type] + server = topology.select_server_by_address(tuple(address), _Op.KILL_CURSORS) # type: ignore[arg-type] else: # Application called close_cursor() with no address. - server = topology.select_server(writable_server_selector) + server = topology.select_server(writable_server_selector, _Op.KILL_CURSORS) with self._checkout(server, session) as conn: assert address is not None @@ -1703,7 +1752,7 @@ def _kill_cursor_impl( ) -> None: namespace = address.namespace db, coll = namespace.split(".", 1) - spec = SON([("killCursors", coll), ("cursors", cursor_ids)]) + spec = {"killCursors": coll, "cursors": cursor_ids} conn.command(db, spec, session=session, client=self) def _process_kill_cursors(self) -> None: @@ -1761,12 +1810,7 @@ def _process_periodic_tasks(self) -> None: helpers._handle_exception() def __start_session(self, implicit: bool, **kwargs: Any) -> ClientSession: - # Raises ConfigurationError if sessions are not supported. - if implicit: - self._topology._check_implicit_session_support() - server_session: Union[_EmptyServerSession, _ServerSession] = _EmptyServerSession() - else: - server_session = self._get_server_session() + server_session = _EmptyServerSession() opts = client_session.SessionOptions(**kwargs) return client_session.ClientSession(self, server_session, opts, implicit) @@ -1788,8 +1832,7 @@ def start_session( or process at a time. A single :class:`ClientSession` cannot be used to run multiple operations concurrently. - :Returns: - An instance of :class:`~pymongo.client_session.ClientSession`. + :return: An instance of :class:`~pymongo.client_session.ClientSession`. .. versionadded:: 3.6 """ @@ -1800,17 +1843,13 @@ def start_session( snapshot=snapshot, ) - def _get_server_session(self) -> _ServerSession: - """Internal: start or resume a _ServerSession.""" - return self._topology.get_server_session() - def _return_server_session( - self, server_session: Union[_ServerSession, _EmptyServerSession], lock: bool + self, server_session: Union[_ServerSession, _EmptyServerSession] ) -> None: """Internal: return a _ServerSession to the pool.""" if isinstance(server_session, _EmptyServerSession): return None - return self._topology.return_server_session(server_session, lock) + return self._topology.return_server_session(server_session) def _ensure_session(self, session: Optional[ClientSession] = None) -> Optional[ClientSession]: """If provided session is None, lend a temporary session.""" @@ -1878,8 +1917,7 @@ def _process_response(self, reply: Mapping[str, Any], session: Optional[ClientSe def server_info(self, session: Optional[client_session.ClientSession] = None) -> dict[str, Any]: """Get information about the MongoDB server we're connected to. - :Parameters: - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. .. versionchanged:: 3.6 @@ -1900,29 +1938,27 @@ def list_databases( ) -> CommandCursor[dict[str, Any]]: """Get a cursor over the databases of the connected server. - :Parameters: - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. - - `**kwargs` (optional): Optional parameters of the + :param kwargs: Optional parameters of the `listDatabases command `_ can be passed as keyword arguments to this method. The supported options differ by server version. - :Returns: - An instance of :class:`~pymongo.command_cursor.CommandCursor`. + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. .. versionadded:: 3.6 """ - cmd = SON([("listDatabases", 1)]) + cmd = {"listDatabases": 1} cmd.update(kwargs) if comment is not None: cmd["comment"] = comment admin = self._database_default_options("admin") - res = admin._retryable_read_command(cmd, session=session) + res = admin._retryable_read_command(cmd, session=session, operation=_Op.LIST_DATABASES) # listDatabases doesn't return a cursor (yet). Fake one. cursor = { "id": 0, @@ -1938,10 +1974,9 @@ def list_database_names( ) -> list[str]: """Get a list of the names of all databases on the connected server. - :Parameters: - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. .. versionchanged:: 4.1 @@ -1963,13 +1998,12 @@ def drop_database( Raises :class:`TypeError` if `name_or_database` is not an instance of :class:`str` or :class:`~pymongo.database.Database`. - :Parameters: - - `name_or_database`: the name of a database to drop, or a + :param name_or_database: the name of a database to drop, or a :class:`~pymongo.database.Database` instance representing the database to drop - - `session` (optional): a + :param session: a :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. .. versionchanged:: 4.1 @@ -1993,7 +2027,7 @@ def drop_database( if not isinstance(name, str): raise TypeError("name_or_database must be an instance of str or a Database") - with self._conn_for_writes(session) as conn: + with self._conn_for_writes(session, operation=_Op.DROP_DATABASE) as conn: self[name]._command( conn, {"dropDatabase": 1, "comment": comment}, @@ -2006,7 +2040,7 @@ def drop_database( def get_default_database( self, default: Optional[str] = None, - codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional[ReadConcern] = None, @@ -2023,26 +2057,25 @@ def get_default_database( Useful in scripts where you want to choose which database to use based only on the URI in a configuration file. - :Parameters: - - `default` (optional): the database name to use if no database name + :param default: the database name to use if no database name was provided in the URI. - - `codec_options` (optional): An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the default) the :attr:`codec_options` of this :class:`MongoClient` is used. - - `read_preference` (optional): The read preference to use. If + :param read_preference: The read preference to use. If ``None`` (the default) the :attr:`read_preference` of this :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` for options. - - `write_concern` (optional): An instance of + :param write_concern: An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the default) the :attr:`write_concern` of this :class:`MongoClient` is used. - - `read_concern` (optional): An instance of + :param read_concern: An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) the :attr:`read_concern` of this :class:`MongoClient` is used. - - `comment` (optional): A user-provided comment to attach to this + :param comment: A user-provided comment to attach to this command. .. versionchanged:: 4.1 @@ -2067,7 +2100,7 @@ def get_default_database( def get_database( self, name: Optional[str] = None, - codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional[ReadConcern] = None, @@ -2090,23 +2123,22 @@ def get_database( >>> db2.read_preference Secondary(tag_sets=None) - :Parameters: - - `name` (optional): The name of the database - a string. If ``None`` + :param name: The name of the database - a string. If ``None`` (the default) the database named in the MongoDB connection URI is returned. - - `codec_options` (optional): An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the default) the :attr:`codec_options` of this :class:`MongoClient` is used. - - `read_preference` (optional): The read preference to use. If + :param read_preference: The read preference to use. If ``None`` (the default) the :attr:`read_preference` of this :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` for options. - - `write_concern` (optional): An instance of + :param write_concern: An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the default) the :attr:`write_concern` of this :class:`MongoClient` is used. - - `read_concern` (optional): An instance of + :param read_concern: An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) the :attr:`read_concern` of this :class:`MongoClient` is used. @@ -2160,7 +2192,7 @@ def _retryable_error_doc(exc: PyMongoError) -> Optional[Mapping[str, Any]]: return None -def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int) -> None: +def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int, is_mongos: bool) -> None: doc = _retryable_error_doc(exc) if doc: code = doc.get("code", 0) @@ -2177,7 +2209,10 @@ def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int) -> None for label in doc.get("errorLabels", []): exc._add_error_label(label) else: - if code in helpers._RETRYABLE_ERROR_CODES: + # Do not consult writeConcernError for pre-4.4 mongos. + if isinstance(exc, WriteConcernError) and is_mongos: + pass + elif code in helpers._RETRYABLE_ERROR_CODES: exc._add_error_label("RetryableWriteError") # Connection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is @@ -2269,11 +2304,13 @@ def __init__( mongo_client: MongoClient, func: _WriteCall[T] | _ReadCall[T], bulk: Optional[_Bulk], + operation: str, is_read: bool = False, session: Optional[ClientSession] = None, read_pref: Optional[_ServerMode] = None, address: Optional[_Address] = None, retryable: bool = False, + operation_id: Optional[int] = None, ): self._last_error: Optional[Exception] = None self._retrying = False @@ -2291,15 +2328,16 @@ def __init__( ) self._address = address self._server: Server = None # type: ignore + self._deprioritized_servers: list[Server] = [] + self._operation = operation + self._operation_id = operation_id def run(self) -> T: """Runs the supplied func() and attempts a retry - :Raises: - self._last_error: Last exception raised + :raises: self._last_error: Last exception raised - :Returns: - Result of the func() call + :return: Result of the func() call """ # Increment the transaction id up front to ensure any retry attempt # will use the proper txnNumber, even if server or socket selection @@ -2329,7 +2367,8 @@ def run(self) -> T: # ConnectionFailures do not supply a code property exc_code = getattr(exc, "code", None) if self._is_not_eligible_for_retry() or ( - exc_code and exc_code not in helpers._RETRYABLE_ERROR_CODES + isinstance(exc, OperationFailure) + and exc_code not in helpers._RETRYABLE_ERROR_CODES ): raise self._retrying = True @@ -2359,6 +2398,9 @@ def run(self) -> T: if self._last_error is None: self._last_error = exc + if self._client.topology_description.topology_type == TOPOLOGY_TYPE.Sharded: + self._deprioritized_servers.append(self._server) + def _is_not_eligible_for_retry(self) -> bool: """Checks if the exchange is not eligible for retry""" return not self._retryable or (self._is_retrying() and not self._multiple_retries) @@ -2381,8 +2423,7 @@ def _check_last_error(self, check_csot: bool = False) -> None: """Checks if the ongoing client exchange experienced a exception previously. If so, raise last error - :Parameters: - - `check_csot`: Checks CSOT to ensure we are retrying with time remaining defaults to False + :param check_csot: Checks CSOT to ensure we are retrying with time remaining defaults to False """ if self._is_retrying(): remaining = _csot.remaining() @@ -2393,28 +2434,35 @@ def _check_last_error(self, check_csot: bool = False) -> None: def _get_server(self) -> Server: """Retrieves a server object based on provided object context - :Returns: - Abstraction to connect to server + :return: Abstraction to connect to server """ return self._client._select_server( - self._server_selector, self._session, address=self._address + self._server_selector, + self._session, + self._operation, + address=self._address, + deprioritized_servers=self._deprioritized_servers, + operation_id=self._operation_id, ) def _write(self) -> T: """Wrapper method for write-type retryable client executions - :Returns: - Output for func()'s call + :return: Output for func()'s call """ try: max_wire_version = 0 + is_mongos = False self._server = self._get_server() - supports_session = ( - self._session is not None and self._server.description.retryable_writes_supported - ) with self._client._checkout(self._server, self._session) as conn: max_wire_version = conn.max_wire_version - if self._retryable and not supports_session: + sessions_supported = ( + self._session + and self._server.description.retryable_writes_supported + and conn.supports_sessions + ) + is_mongos = conn.is_mongos + if not sessions_supported: # A retry is not possible because this server does # not support sessions raise the last error. self._check_last_error() @@ -2424,14 +2472,13 @@ def _write(self) -> T: if not self._retryable: raise # Add the RetryableWriteError label, if applicable. - _add_retryable_write_error(exc, max_wire_version) + _add_retryable_write_error(exc, max_wire_version, is_mongos) raise def _read(self) -> T: """Wrapper method for read-type retryable client executions - :Returns: - Output for func()'s call + :return: Output for func()'s call """ self._server = self._get_server() assert self._read_pref is not None, "Read Preference required on read calls" @@ -2456,6 +2503,31 @@ def _after_fork_child() -> None: client._after_fork() +def _detect_external_db(entity: str) -> bool: + """Detects external database hosts and logs an informational message at the INFO level.""" + entity = entity.lower() + cosmos_db_hosts = [".cosmos.azure.com"] + document_db_hosts = [".docdb.amazonaws.com", ".docdb-elastic.amazonaws.com"] + + for host in cosmos_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a CosmosDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/cosmosdb", + ) + return True + for host in document_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a DocumentDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/documentdb", + ) + return True + return False + + if _HAS_REGISTER_AT_FORK: # This will run in the same thread as the fork was called. # If we fork in a critical region on the same thread, it should break. diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 92b12f7317..64945dd106 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -23,7 +23,7 @@ from pymongo import common, periodic_executor from pymongo._csot import MovingMinimum -from pymongo.errors import NotPrimaryError, OperationFailure, _OperationCancelled +from pymongo.errors import NetworkTimeout, NotPrimaryError, OperationFailure, _OperationCancelled from pymongo.hello import Hello from pymongo.lock import _create_lock from pymongo.periodic_executor import _shutdown_executors @@ -52,6 +52,7 @@ def __init__(self, topology: Topology, name: str, interval: int, min_interval: f The background thread is signaled to stop when the Topology or this instance is freed. """ + # We strongly reference the executor and it weakly references us via # this closure. When the monitor is freed, stop the executor soon. def target() -> bool: @@ -203,7 +204,9 @@ def _run(self) -> None: # Update the Topology and clear the server pool on error. self._topology.on_change( - self._server_description, reset_pool=self._server_description.error + self._server_description, + reset_pool=self._server_description.error, + interrupt_connections=isinstance(self._server_description.error, NetworkTimeout), ) if self._stream and ( @@ -332,8 +335,12 @@ def __init__(self, topology: Topology, topology_settings: TopologySettings): self._seedlist = self._settings._seeds assert isinstance(self._settings.fqdn, str) self._fqdn: str = self._settings.fqdn + self._startup_time = time.monotonic() def _run(self) -> None: + # Don't poll right after creation, wait 60 seconds first + if time.monotonic() < self._startup_time + common.MIN_SRV_RESCAN_INTERVAL: + return seedlist = self._get_seedlist() if seedlist: self._seedlist = seedlist diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 03b3c53180..896a747e72 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -191,7 +191,7 @@ def connection_checked_in(self, event): from bson.objectid import ObjectId from pymongo.hello import Hello, HelloCompat -from pymongo.helpers import _handle_exception +from pymongo.helpers import _SENSITIVE_COMMANDS, _handle_exception from pymongo.typings import _Address, _DocumentOut if TYPE_CHECKING: @@ -229,24 +229,21 @@ class CommandListener(_EventListener): def started(self, event: CommandStartedEvent) -> None: """Abstract method to handle a `CommandStartedEvent`. - :Parameters: - - `event`: An instance of :class:`CommandStartedEvent`. + :param event: An instance of :class:`CommandStartedEvent`. """ raise NotImplementedError def succeeded(self, event: CommandSucceededEvent) -> None: """Abstract method to handle a `CommandSucceededEvent`. - :Parameters: - - `event`: An instance of :class:`CommandSucceededEvent`. + :param event: An instance of :class:`CommandSucceededEvent`. """ raise NotImplementedError def failed(self, event: CommandFailedEvent) -> None: """Abstract method to handle a `CommandFailedEvent`. - :Parameters: - - `event`: An instance of :class:`CommandFailedEvent`. + :param event: An instance of :class:`CommandFailedEvent`. """ raise NotImplementedError @@ -272,8 +269,7 @@ def pool_created(self, event: PoolCreatedEvent) -> None: Emitted when a connection Pool is created. - :Parameters: - - `event`: An instance of :class:`PoolCreatedEvent`. + :param event: An instance of :class:`PoolCreatedEvent`. """ raise NotImplementedError @@ -282,8 +278,7 @@ def pool_ready(self, event: PoolReadyEvent) -> None: Emitted when a connection Pool is marked ready. - :Parameters: - - `event`: An instance of :class:`PoolReadyEvent`. + :param event: An instance of :class:`PoolReadyEvent`. .. versionadded:: 4.0 """ @@ -294,8 +289,7 @@ def pool_cleared(self, event: PoolClearedEvent) -> None: Emitted when a connection Pool is cleared. - :Parameters: - - `event`: An instance of :class:`PoolClearedEvent`. + :param event: An instance of :class:`PoolClearedEvent`. """ raise NotImplementedError @@ -304,8 +298,7 @@ def pool_closed(self, event: PoolClosedEvent) -> None: Emitted when a connection Pool is closed. - :Parameters: - - `event`: An instance of :class:`PoolClosedEvent`. + :param event: An instance of :class:`PoolClosedEvent`. """ raise NotImplementedError @@ -314,8 +307,7 @@ def connection_created(self, event: ConnectionCreatedEvent) -> None: Emitted when a connection Pool creates a Connection object. - :Parameters: - - `event`: An instance of :class:`ConnectionCreatedEvent`. + :param event: An instance of :class:`ConnectionCreatedEvent`. """ raise NotImplementedError @@ -325,8 +317,7 @@ def connection_ready(self, event: ConnectionReadyEvent) -> None: Emitted when a connection has finished its setup, and is now ready to use. - :Parameters: - - `event`: An instance of :class:`ConnectionReadyEvent`. + :param event: An instance of :class:`ConnectionReadyEvent`. """ raise NotImplementedError @@ -335,8 +326,7 @@ def connection_closed(self, event: ConnectionClosedEvent) -> None: Emitted when a connection Pool closes a connection. - :Parameters: - - `event`: An instance of :class:`ConnectionClosedEvent`. + :param event: An instance of :class:`ConnectionClosedEvent`. """ raise NotImplementedError @@ -345,8 +335,7 @@ def connection_check_out_started(self, event: ConnectionCheckOutStartedEvent) -> Emitted when the driver starts attempting to check out a connection. - :Parameters: - - `event`: An instance of :class:`ConnectionCheckOutStartedEvent`. + :param event: An instance of :class:`ConnectionCheckOutStartedEvent`. """ raise NotImplementedError @@ -355,8 +344,7 @@ def connection_check_out_failed(self, event: ConnectionCheckOutFailedEvent) -> N Emitted when the driver's attempt to check out a connection fails. - :Parameters: - - `event`: An instance of :class:`ConnectionCheckOutFailedEvent`. + :param event: An instance of :class:`ConnectionCheckOutFailedEvent`. """ raise NotImplementedError @@ -365,8 +353,7 @@ def connection_checked_out(self, event: ConnectionCheckedOutEvent) -> None: Emitted when the driver successfully checks out a connection. - :Parameters: - - `event`: An instance of :class:`ConnectionCheckedOutEvent`. + :param event: An instance of :class:`ConnectionCheckedOutEvent`. """ raise NotImplementedError @@ -376,8 +363,7 @@ def connection_checked_in(self, event: ConnectionCheckedInEvent) -> None: Emitted when the driver checks in a connection back to the connection Pool. - :Parameters: - - `event`: An instance of :class:`ConnectionCheckedInEvent`. + :param event: An instance of :class:`ConnectionCheckedInEvent`. """ raise NotImplementedError @@ -394,24 +380,21 @@ class ServerHeartbeatListener(_EventListener): def started(self, event: ServerHeartbeatStartedEvent) -> None: """Abstract method to handle a `ServerHeartbeatStartedEvent`. - :Parameters: - - `event`: An instance of :class:`ServerHeartbeatStartedEvent`. + :param event: An instance of :class:`ServerHeartbeatStartedEvent`. """ raise NotImplementedError def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: """Abstract method to handle a `ServerHeartbeatSucceededEvent`. - :Parameters: - - `event`: An instance of :class:`ServerHeartbeatSucceededEvent`. + :param event: An instance of :class:`ServerHeartbeatSucceededEvent`. """ raise NotImplementedError def failed(self, event: ServerHeartbeatFailedEvent) -> None: """Abstract method to handle a `ServerHeartbeatFailedEvent`. - :Parameters: - - `event`: An instance of :class:`ServerHeartbeatFailedEvent`. + :param event: An instance of :class:`ServerHeartbeatFailedEvent`. """ raise NotImplementedError @@ -427,24 +410,21 @@ class TopologyListener(_EventListener): def opened(self, event: TopologyOpenedEvent) -> None: """Abstract method to handle a `TopologyOpenedEvent`. - :Parameters: - - `event`: An instance of :class:`TopologyOpenedEvent`. + :param event: An instance of :class:`TopologyOpenedEvent`. """ raise NotImplementedError def description_changed(self, event: TopologyDescriptionChangedEvent) -> None: """Abstract method to handle a `TopologyDescriptionChangedEvent`. - :Parameters: - - `event`: An instance of :class:`TopologyDescriptionChangedEvent`. + :param event: An instance of :class:`TopologyDescriptionChangedEvent`. """ raise NotImplementedError def closed(self, event: TopologyClosedEvent) -> None: """Abstract method to handle a `TopologyClosedEvent`. - :Parameters: - - `event`: An instance of :class:`TopologyClosedEvent`. + :param event: An instance of :class:`TopologyClosedEvent`. """ raise NotImplementedError @@ -460,24 +440,21 @@ class ServerListener(_EventListener): def opened(self, event: ServerOpeningEvent) -> None: """Abstract method to handle a `ServerOpeningEvent`. - :Parameters: - - `event`: An instance of :class:`ServerOpeningEvent`. + :param event: An instance of :class:`ServerOpeningEvent`. """ raise NotImplementedError def description_changed(self, event: ServerDescriptionChangedEvent) -> None: """Abstract method to handle a `ServerDescriptionChangedEvent`. - :Parameters: - - `event`: An instance of :class:`ServerDescriptionChangedEvent`. + :param event: An instance of :class:`ServerDescriptionChangedEvent`. """ raise NotImplementedError def closed(self, event: ServerClosedEvent) -> None: """Abstract method to handle a `ServerClosedEvent`. - :Parameters: - - `event`: An instance of :class:`ServerClosedEvent`. + :param event: An instance of :class:`ServerClosedEvent`. """ raise NotImplementedError @@ -507,8 +484,7 @@ def _validate_event_listeners( def register(listener: _EventListener) -> None: """Register a global event listener. - :Parameters: - - `listener`: A subclasses of :class:`CommandListener`, + :param listener: A subclasses of :class:`CommandListener`, :class:`ServerHeartbeatListener`, :class:`ServerListener`, :class:`TopologyListener`, or :class:`ConnectionPoolListener`. """ @@ -531,22 +507,6 @@ def register(listener: _EventListener) -> None: _LISTENERS.cmap_listeners.append(listener) -# Note - to avoid bugs from forgetting which if these is all lowercase and -# which are camelCase, and at the same time avoid having to add a test for -# every command, use all lowercase here and test against command_name.lower(). -_SENSITIVE_COMMANDS: set = { - "authenticate", - "saslstart", - "saslcontinue", - "getnonce", - "createuser", - "updateuser", - "copydbgetnonce", - "copydbsaslstart", - "copydb", -} - - # The "hello" command is also deemed sensitive when attempting speculative # authentication. def _is_speculative_authenticate(command_name: str, doc: Mapping[str, Any]) -> bool: @@ -561,7 +521,15 @@ def _is_speculative_authenticate(command_name: str, doc: Mapping[str, Any]) -> b class _CommandEvent: """Base class for command events.""" - __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id", "__service_id", "__db") + __slots__ = ( + "__cmd_name", + "__rqst_id", + "__conn_id", + "__op_id", + "__service_id", + "__db", + "__server_conn_id", + ) def __init__( self, @@ -571,6 +539,7 @@ def __init__( operation_id: Optional[int], service_id: Optional[ObjectId] = None, database_name: str = "", + server_connection_id: Optional[int] = None, ) -> None: self.__cmd_name = command_name self.__rqst_id = request_id @@ -578,6 +547,7 @@ def __init__( self.__op_id = operation_id self.__service_id = service_id self.__db = database_name + self.__server_conn_id = server_connection_id @property def command_name(self) -> str: @@ -615,18 +585,25 @@ def database_name(self) -> str: """ return self.__db + @property + def server_connection_id(self) -> Optional[int]: + """The server-side connection id for the connection this command was sent on, or ``None``. + + .. versionadded:: 4.7 + """ + return self.__server_conn_id + class CommandStartedEvent(_CommandEvent): """Event published when a command starts. - :Parameters: - - `command`: The command document. - - `database_name`: The name of the database this command was run against. - - `request_id`: The request id for this operation. - - `connection_id`: The address (host, port) of the server this command + :param command: The command document. + :param database_name: The name of the database this command was run against. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command was sent to. - - `operation_id`: An optional identifier for a series of related events. - - `service_id`: The service_id this command was sent to, or ``None``. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. """ __slots__ = ("__cmd",) @@ -639,6 +616,7 @@ def __init__( connection_id: _Address, operation_id: Optional[int], service_id: Optional[ObjectId] = None, + server_connection_id: Optional[int] = None, ) -> None: if not command: raise ValueError(f"{command!r} is not a valid command") @@ -651,6 +629,7 @@ def __init__( operation_id, service_id=service_id, database_name=database_name, + server_connection_id=server_connection_id, ) cmd_name = command_name.lower() if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command): @@ -669,29 +648,31 @@ def database_name(self) -> str: return super().database_name def __repr__(self) -> str: - return ("<{} {} db: {!r}, command: {!r}, operation_id: {}, service_id: {}>").format( + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, service_id: {}, server_connection_id: {}>" + ).format( self.__class__.__name__, self.connection_id, self.database_name, self.command_name, self.operation_id, self.service_id, + self.server_connection_id, ) class CommandSucceededEvent(_CommandEvent): """Event published when a command succeeds. - :Parameters: - - `duration`: The command duration as a datetime.timedelta. - - `reply`: The server reply document. - - `command_name`: The command name. - - `request_id`: The request id for this operation. - - `connection_id`: The address (host, port) of the server this command + :param duration: The command duration as a datetime.timedelta. + :param reply: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command was sent to. - - `operation_id`: An optional identifier for a series of related events. - - `service_id`: The service_id this command was sent to, or ``None``. - - `database_name`: The database this command was sent to, or ``""``. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. """ __slots__ = ("__duration_micros", "__reply") @@ -706,6 +687,7 @@ def __init__( operation_id: Optional[int], service_id: Optional[ObjectId] = None, database_name: str = "", + server_connection_id: Optional[int] = None, ) -> None: super().__init__( command_name, @@ -714,6 +696,7 @@ def __init__( operation_id, service_id=service_id, database_name=database_name, + server_connection_id=server_connection_id, ) self.__duration_micros = _to_micros(duration) cmd_name = command_name.lower() @@ -734,7 +717,7 @@ def reply(self) -> _DocumentOut: def __repr__(self) -> str: return ( - "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, service_id: {}>" + "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, service_id: {}, server_connection_id: {}>" ).format( self.__class__.__name__, self.connection_id, @@ -743,22 +726,22 @@ def __repr__(self) -> str: self.operation_id, self.duration_micros, self.service_id, + self.server_connection_id, ) class CommandFailedEvent(_CommandEvent): """Event published when a command fails. - :Parameters: - - `duration`: The command duration as a datetime.timedelta. - - `failure`: The server reply document. - - `command_name`: The command name. - - `request_id`: The request id for this operation. - - `connection_id`: The address (host, port) of the server this command + :param duration: The command duration as a datetime.timedelta. + :param failure: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command was sent to. - - `operation_id`: An optional identifier for a series of related events. - - `service_id`: The service_id this command was sent to, or ``None``. - - `database_name`: The database this command was sent to, or ``""``. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. """ __slots__ = ("__duration_micros", "__failure") @@ -773,6 +756,7 @@ def __init__( operation_id: Optional[int], service_id: Optional[ObjectId] = None, database_name: str = "", + server_connection_id: Optional[int] = None, ) -> None: super().__init__( command_name, @@ -781,6 +765,7 @@ def __init__( operation_id, service_id=service_id, database_name=database_name, + server_connection_id=server_connection_id, ) self.__duration_micros = _to_micros(duration) self.__failure = failure @@ -798,7 +783,7 @@ def failure(self) -> _DocumentOut: def __repr__(self) -> str: return ( "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, " - "failure: {!r}, service_id: {}>" + "failure: {!r}, service_id: {}, server_connection_id: {}>" ).format( self.__class__.__name__, self.connection_id, @@ -808,6 +793,7 @@ def __repr__(self) -> str: self.duration_micros, self.failure, self.service_id, + self.server_connection_id, ) @@ -833,8 +819,7 @@ def __repr__(self) -> str: class PoolCreatedEvent(_PoolEvent): """Published when a Connection Pool is created. - :Parameters: - - `address`: The address (host, port) pair of the server this Pool is + :param address: The address (host, port) pair of the server this Pool is attempting to connect to. .. versionadded:: 3.9 @@ -858,8 +843,7 @@ def __repr__(self) -> str: class PoolReadyEvent(_PoolEvent): """Published when a Connection Pool is marked ready. - :Parameters: - - `address`: The address (host, port) pair of the server this Pool is + :param address: The address (host, port) pair of the server this Pool is attempting to connect to. .. versionadded:: 4.0 @@ -871,19 +855,25 @@ class PoolReadyEvent(_PoolEvent): class PoolClearedEvent(_PoolEvent): """Published when a Connection Pool is cleared. - :Parameters: - - `address`: The address (host, port) pair of the server this Pool is + :param address: The address (host, port) pair of the server this Pool is attempting to connect to. - - `service_id`: The service_id this command was sent to, or ``None``. + :param service_id: The service_id this command was sent to, or ``None``. + :param interrupt_connections: True if all active connections were interrupted by the Pool during clearing. .. versionadded:: 3.9 """ - __slots__ = ("__service_id",) + __slots__ = ("__service_id", "__interrupt_connections") - def __init__(self, address: _Address, service_id: Optional[ObjectId] = None) -> None: + def __init__( + self, + address: _Address, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, + ) -> None: super().__init__(address) self.__service_id = service_id + self.__interrupt_connections = interrupt_connections @property def service_id(self) -> Optional[ObjectId]: @@ -895,15 +885,22 @@ def service_id(self) -> Optional[ObjectId]: """ return self.__service_id + @property + def interrupt_connections(self) -> bool: + """If True, active connections are interrupted during clearing. + + .. versionadded:: 4.7 + """ + return self.__interrupt_connections + def __repr__(self) -> str: - return f"{self.__class__.__name__}({self.address!r}, {self.__service_id!r})" + return f"{self.__class__.__name__}({self.address!r}, {self.__service_id!r}, {self.__interrupt_connections!r})" class PoolClosedEvent(_PoolEvent): """Published when a Connection Pool is closed. - :Parameters: - - `address`: The address (host, port) pair of the server this Pool is + :param address: The address (host, port) pair of the server this Pool is attempting to connect to. .. versionadded:: 3.9 @@ -989,16 +986,36 @@ def __repr__(self) -> str: return f"{self.__class__.__name__}({self.address!r}, {self.__connection_id!r})" +class _ConnectionDurationEvent(_ConnectionIdEvent): + """Private base class for connection events with a duration.""" + + __slots__ = ("__duration",) + + def __init__(self, address: _Address, connection_id: int, duration: Optional[float]) -> None: + super().__init__(address, connection_id) + self.__duration = duration + + @property + def duration(self) -> Optional[float]: + """The duration of the connection event. + + .. versionadded:: 4.7 + """ + return self.__duration + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.connection_id!r}, {self.__duration!r})" + + class ConnectionCreatedEvent(_ConnectionIdEvent): """Published when a Connection Pool creates a Connection object. NOTE: This connection is not ready for use until the :class:`ConnectionReadyEvent` is published. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. - - `connection_id`: The integer ID of the Connection in this Pool. + :param connection_id: The integer ID of the Connection in this Pool. .. versionadded:: 3.9 """ @@ -1006,13 +1023,12 @@ class ConnectionCreatedEvent(_ConnectionIdEvent): __slots__ = () -class ConnectionReadyEvent(_ConnectionIdEvent): +class ConnectionReadyEvent(_ConnectionDurationEvent): """Published when a Connection has finished its setup, and is ready to use. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. - - `connection_id`: The integer ID of the Connection in this Pool. + :param connection_id: The integer ID of the Connection in this Pool. .. versionadded:: 3.9 """ @@ -1023,11 +1039,10 @@ class ConnectionReadyEvent(_ConnectionIdEvent): class ConnectionClosedEvent(_ConnectionIdEvent): """Published when a Connection is closed. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. - - `connection_id`: The integer ID of the Connection in this Pool. - - `reason`: A reason explaining why this connection was closed. + :param connection_id: The integer ID of the Connection in this Pool. + :param reason: A reason explaining why this connection was closed. .. versionadded:: 3.9 """ @@ -1059,8 +1074,7 @@ def __repr__(self) -> str: class ConnectionCheckOutStartedEvent(_ConnectionEvent): """Published when the driver starts attempting to check out a connection. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. .. versionadded:: 3.9 @@ -1069,21 +1083,20 @@ class ConnectionCheckOutStartedEvent(_ConnectionEvent): __slots__ = () -class ConnectionCheckOutFailedEvent(_ConnectionEvent): +class ConnectionCheckOutFailedEvent(_ConnectionDurationEvent): """Published when the driver's attempt to check out a connection fails. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. - - `reason`: A reason explaining why connection check out failed. + :param reason: A reason explaining why connection check out failed. .. versionadded:: 3.9 """ __slots__ = ("__reason",) - def __init__(self, address: _Address, reason: str) -> None: - super().__init__(address) + def __init__(self, address: _Address, reason: str, duration: Optional[float]) -> None: + super().__init__(address=address, connection_id=0, duration=duration) self.__reason = reason @property @@ -1096,16 +1109,15 @@ def reason(self) -> str: return self.__reason def __repr__(self) -> str: - return f"{self.__class__.__name__}({self.address!r}, {self.__reason!r})" + return f"{self.__class__.__name__}({self.address!r}, {self.__reason!r}, {self.duration!r})" -class ConnectionCheckedOutEvent(_ConnectionIdEvent): +class ConnectionCheckedOutEvent(_ConnectionDurationEvent): """Published when the driver successfully checks out a connection. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. - - `connection_id`: The integer ID of the Connection in this Pool. + :param connection_id: The integer ID of the Connection in this Pool. .. versionadded:: 3.9 """ @@ -1116,10 +1128,9 @@ class ConnectionCheckedOutEvent(_ConnectionIdEvent): class ConnectionCheckedInEvent(_ConnectionIdEvent): """Published when the driver checks in a Connection into the Pool. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. - - `connection_id`: The integer ID of the Connection in this Pool. + :param connection_id: The integer ID of the Connection in this Pool. .. versionadded:: 3.9 """ @@ -1422,8 +1433,7 @@ class _EventListeners: Any event listeners registered globally are included by default. - :Parameters: - - `listeners`: A list of event listeners. + :param listeners: A list of event listeners. """ def __init__(self, listeners: Optional[Sequence[_EventListener]]): @@ -1492,25 +1502,31 @@ def publish_command_start( database_name: str, request_id: int, connection_id: _Address, + server_connection_id: Optional[int], op_id: Optional[int] = None, service_id: Optional[ObjectId] = None, ) -> None: """Publish a CommandStartedEvent to all command listeners. - :Parameters: - - `command`: The command document. - - `database_name`: The name of the database this command was run + :param command: The command document. + :param database_name: The name of the database this command was run against. - - `request_id`: The request id for this operation. - - `connection_id`: The address (host, port) of the server this + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command was sent to. - - `op_id`: The (optional) operation id for this operation. - - `service_id`: The service_id this command was sent to, or ``None``. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. """ if op_id is None: op_id = request_id event = CommandStartedEvent( - command, database_name, request_id, connection_id, op_id, service_id=service_id + command, + database_name, + request_id, + connection_id, + op_id, + service_id=service_id, + server_connection_id=server_connection_id, ) for subscriber in self.__command_listeners: try: @@ -1525,6 +1541,7 @@ def publish_command_success( command_name: str, request_id: int, connection_id: _Address, + server_connection_id: Optional[int], op_id: Optional[int] = None, service_id: Optional[ObjectId] = None, speculative_hello: bool = False, @@ -1532,17 +1549,16 @@ def publish_command_success( ) -> None: """Publish a CommandSucceededEvent to all command listeners. - :Parameters: - - `duration`: The command duration as a datetime.timedelta. - - `reply`: The server reply document. - - `command_name`: The command name. - - `request_id`: The request id for this operation. - - `connection_id`: The address (host, port) of the server this + :param duration: The command duration as a datetime.timedelta. + :param reply: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command was sent to. - - `op_id`: The (optional) operation id for this operation. - - `service_id`: The service_id this command was sent to, or ``None``. - - `speculative_hello`: Was the command sent with speculative auth? - - `database_name`: The database this command was sent to, or ``""``. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. + :param speculative_hello: Was the command sent with speculative auth? + :param database_name: The database this command was sent to, or ``""``. """ if op_id is None: op_id = request_id @@ -1559,6 +1575,7 @@ def publish_command_success( op_id, service_id, database_name=database_name, + server_connection_id=server_connection_id, ) for subscriber in self.__command_listeners: try: @@ -1573,23 +1590,23 @@ def publish_command_failure( command_name: str, request_id: int, connection_id: _Address, + server_connection_id: Optional[int], op_id: Optional[int] = None, service_id: Optional[ObjectId] = None, database_name: str = "", ) -> None: """Publish a CommandFailedEvent to all command listeners. - :Parameters: - - `duration`: The command duration as a datetime.timedelta. - - `failure`: The server reply document or failure description + :param duration: The command duration as a datetime.timedelta. + :param failure: The server reply document or failure description document. - - `command_name`: The command name. - - `request_id`: The request id for this operation. - - `connection_id`: The address (host, port) of the server this + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command was sent to. - - `op_id`: The (optional) operation id for this operation. - - `service_id`: The service_id this command was sent to, or ``None``. - - `database_name`: The database this command was sent to, or ``""``. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. """ if op_id is None: op_id = request_id @@ -1602,6 +1619,7 @@ def publish_command_failure( op_id, service_id=service_id, database_name=database_name, + server_connection_id=server_connection_id, ) for subscriber in self.__command_listeners: try: @@ -1613,9 +1631,8 @@ def publish_server_heartbeat_started(self, connection_id: _Address, awaited: boo """Publish a ServerHeartbeatStartedEvent to all server heartbeat listeners. - :Parameters: - - `connection_id`: The address (host, port) pair of the connection. - - `awaited`: True if this heartbeat is part of an awaitable hello command. + :param connection_id: The address (host, port) pair of the connection. + :param awaited: True if this heartbeat is part of an awaitable hello command. """ event = ServerHeartbeatStartedEvent(connection_id, awaited) for subscriber in self.__server_heartbeat_listeners: @@ -1630,12 +1647,11 @@ def publish_server_heartbeat_succeeded( """Publish a ServerHeartbeatSucceededEvent to all server heartbeat listeners. - :Parameters: - - `connection_id`: The address (host, port) pair of the connection. - - `duration`: The execution time of the event in the highest possible + :param connection_id: The address (host, port) pair of the connection. + :param duration: The execution time of the event in the highest possible resolution for the platform. - - `reply`: The command reply. - - `awaited`: True if the response was awaited. + :param reply: The command reply. + :param awaited: True if the response was awaited. """ event = ServerHeartbeatSucceededEvent(duration, reply, connection_id, awaited) for subscriber in self.__server_heartbeat_listeners: @@ -1650,12 +1666,11 @@ def publish_server_heartbeat_failed( """Publish a ServerHeartbeatFailedEvent to all server heartbeat listeners. - :Parameters: - - `connection_id`: The address (host, port) pair of the connection. - - `duration`: The execution time of the event in the highest possible + :param connection_id: The address (host, port) pair of the connection. + :param duration: The execution time of the event in the highest possible resolution for the platform. - - `reply`: The command reply. - - `awaited`: True if the response was awaited. + :param reply: The command reply. + :param awaited: True if the response was awaited. """ event = ServerHeartbeatFailedEvent(duration, reply, connection_id, awaited) for subscriber in self.__server_heartbeat_listeners: @@ -1667,9 +1682,8 @@ def publish_server_heartbeat_failed( def publish_server_opened(self, server_address: _Address, topology_id: ObjectId) -> None: """Publish a ServerOpeningEvent to all server listeners. - :Parameters: - - `server_address`: The address (host, port) pair of the server. - - `topology_id`: A unique identifier for the topology this server + :param server_address: The address (host, port) pair of the server. + :param topology_id: A unique identifier for the topology this server is a part of. """ event = ServerOpeningEvent(server_address, topology_id) @@ -1682,9 +1696,8 @@ def publish_server_opened(self, server_address: _Address, topology_id: ObjectId) def publish_server_closed(self, server_address: _Address, topology_id: ObjectId) -> None: """Publish a ServerClosedEvent to all server listeners. - :Parameters: - - `server_address`: The address (host, port) pair of the server. - - `topology_id`: A unique identifier for the topology this server + :param server_address: The address (host, port) pair of the server. + :param topology_id: A unique identifier for the topology this server is a part of. """ event = ServerClosedEvent(server_address, topology_id) @@ -1703,11 +1716,10 @@ def publish_server_description_changed( ) -> None: """Publish a ServerDescriptionChangedEvent to all server listeners. - :Parameters: - - `previous_description`: The previous server description. - - `server_address`: The address (host, port) pair of the server. - - `new_description`: The new server description. - - `topology_id`: A unique identifier for the topology this server + :param previous_description: The previous server description. + :param server_address: The address (host, port) pair of the server. + :param new_description: The new server description. + :param topology_id: A unique identifier for the topology this server is a part of. """ event = ServerDescriptionChangedEvent( @@ -1722,8 +1734,7 @@ def publish_server_description_changed( def publish_topology_opened(self, topology_id: ObjectId) -> None: """Publish a TopologyOpenedEvent to all topology listeners. - :Parameters: - - `topology_id`: A unique identifier for the topology this server + :param topology_id: A unique identifier for the topology this server is a part of. """ event = TopologyOpenedEvent(topology_id) @@ -1736,8 +1747,7 @@ def publish_topology_opened(self, topology_id: ObjectId) -> None: def publish_topology_closed(self, topology_id: ObjectId) -> None: """Publish a TopologyClosedEvent to all topology listeners. - :Parameters: - - `topology_id`: A unique identifier for the topology this server + :param topology_id: A unique identifier for the topology this server is a part of. """ event = TopologyClosedEvent(topology_id) @@ -1755,10 +1765,9 @@ def publish_topology_description_changed( ) -> None: """Publish a TopologyDescriptionChangedEvent to all topology listeners. - :Parameters: - - `previous_description`: The previous topology description. - - `new_description`: The new topology description. - - `topology_id`: A unique identifier for the topology this server + :param previous_description: The previous topology description. + :param new_description: The new topology description. + :param topology_id: A unique identifier for the topology this server is a part of. """ event = TopologyDescriptionChangedEvent(previous_description, new_description, topology_id) @@ -1786,9 +1795,14 @@ def publish_pool_ready(self, address: _Address) -> None: except Exception: _handle_exception() - def publish_pool_cleared(self, address: _Address, service_id: Optional[ObjectId]) -> None: + def publish_pool_cleared( + self, + address: _Address, + service_id: Optional[ObjectId], + interrupt_connections: bool = False, + ) -> None: """Publish a :class:`PoolClearedEvent` to all pool listeners.""" - event = PoolClearedEvent(address, service_id) + event = PoolClearedEvent(address, service_id, interrupt_connections) for subscriber in self.__cmap_listeners: try: subscriber.pool_cleared(event) @@ -1815,9 +1829,11 @@ def publish_connection_created(self, address: _Address, connection_id: int) -> N except Exception: _handle_exception() - def publish_connection_ready(self, address: _Address, connection_id: int) -> None: + def publish_connection_ready( + self, address: _Address, connection_id: int, duration: float + ) -> None: """Publish a :class:`ConnectionReadyEvent` to all connection listeners.""" - event = ConnectionReadyEvent(address, connection_id) + event = ConnectionReadyEvent(address, connection_id, duration) for subscriber in self.__cmap_listeners: try: subscriber.connection_ready(event) @@ -1846,22 +1862,26 @@ def publish_connection_check_out_started(self, address: _Address) -> None: except Exception: _handle_exception() - def publish_connection_check_out_failed(self, address: _Address, reason: str) -> None: + def publish_connection_check_out_failed( + self, address: _Address, reason: str, duration: float + ) -> None: """Publish a :class:`ConnectionCheckOutFailedEvent` to all connection listeners. """ - event = ConnectionCheckOutFailedEvent(address, reason) + event = ConnectionCheckOutFailedEvent(address, reason, duration) for subscriber in self.__cmap_listeners: try: subscriber.connection_check_out_failed(event) except Exception: _handle_exception() - def publish_connection_checked_out(self, address: _Address, connection_id: int) -> None: + def publish_connection_checked_out( + self, address: _Address, connection_id: int, duration: float + ) -> None: """Publish a :class:`ConnectionCheckedOutEvent` to all connection listeners. """ - event = ConnectionCheckedOutEvent(address, connection_id) + event = ConnectionCheckedOutEvent(address, connection_id, duration) for subscriber in self.__cmap_listeners: try: subscriber.connection_checked_out(event) diff --git a/pymongo/network.py b/pymongo/network.py index fb4388121e..76afbe135d 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -17,6 +17,7 @@ import datetime import errno +import logging import socket import struct import time @@ -41,6 +42,7 @@ ProtocolError, _OperationCancelled, ) +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log from pymongo.message import _UNPACK_REPLY, _OpMsg, _OpReply from pymongo.monitoring import _is_speculative_authenticate from pymongo.socket_checker import _errno_from_exception @@ -86,31 +88,30 @@ def command( ) -> _DocumentType: """Execute a command over the socket, or raise socket.error. - :Parameters: - - `conn`: a Connection instance - - `dbname`: name of the database on which to run the command - - `spec`: a command document as an ordered dict type, eg SON. - - `is_mongos`: are we connected to a mongos? - - `read_preference`: a read preference - - `codec_options`: a CodecOptions instance - - `session`: optional ClientSession instance. - - `client`: optional MongoClient instance for updating $clusterTime. - - `check`: raise OperationFailure if there are errors - - `allowable_errors`: errors to ignore if `check` is True - - `address`: the (host, port) of `conn` - - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners` - - `max_bson_size`: The maximum encoded bson size for this server - - `read_concern`: The read concern for this command. - - `parse_write_concern_error`: Whether to parse the ``writeConcernError`` + :param conn: a Connection instance + :param dbname: name of the database on which to run the command + :param spec: a command document as an ordered dict type, eg SON. + :param is_mongos: are we connected to a mongos? + :param read_preference: a read preference + :param codec_options: a CodecOptions instance + :param session: optional ClientSession instance. + :param client: optional MongoClient instance for updating $clusterTime. + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param address: the (host, port) of `conn` + :param listeners: An instance of :class:`~pymongo.monitoring.EventListeners` + :param max_bson_size: The maximum encoded bson size for this server + :param read_concern: The read concern for this command. + :param parse_write_concern_error: Whether to parse the ``writeConcernError`` field in the command response. - - `collation`: The collation for this command. - - `compression_ctx`: optional compression Context. - - `use_op_msg`: True if we should use OP_MSG. - - `unacknowledged`: True if this is an unacknowledged command. - - `user_fields` (optional): Response fields that should be decoded + :param collation: The collation for this command. + :param compression_ctx: optional compression Context. + :param use_op_msg: True if we should use OP_MSG. + :param unacknowledged: True if this is an unacknowledged command. + :param user_fields: Response fields that should be decoded using the TypeDecoders from codec_options, passed to bson._decode_all_selective. - - `exhaust_allowed`: True if we should enable OP_MSG exhaustAllowed. + :param exhaust_allowed: True if we should enable OP_MSG exhaustAllowed. """ name = next(iter(spec)) ns = dbname + ".$cmd" @@ -130,8 +131,8 @@ def command( spec["collation"] = collation publish = listeners is not None and listeners.enabled_for_commands + start = datetime.datetime.now() if publish: - start = datetime.datetime.now() speculative_hello = _is_speculative_authenticate(name, spec) if compression_ctx and name.lower() in _NO_COMPRESSION: @@ -162,15 +163,34 @@ def command( if max_bson_size is not None and size > max_bson_size + message._COMMAND_OVERHEAD: message._raise_document_too_large(name, size, max_bson_size + message._COMMAND_OVERHEAD) - + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.STARTED, + command=spec, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) if publish: - encoding_duration = datetime.datetime.now() - start assert listeners is not None assert address is not None listeners.publish_command_start( - orig, dbname, request_id, address, service_id=conn.service_id + orig, + dbname, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, ) - start = datetime.datetime.now() try: conn.conn.sendall(msg) @@ -196,12 +216,31 @@ def command( parse_write_concern_error=parse_write_concern_error, ) except Exception as exc: + duration = datetime.datetime.now() - start + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = message._convert_exception(exc) + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.FAILED, + durationMS=duration, + failure=failure, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) if publish: - duration = (datetime.datetime.now() - start) + encoding_duration - if isinstance(exc, (NotPrimaryError, OperationFailure)): - failure: _DocumentOut = exc.details # type: ignore[assignment] - else: - failure = message._convert_exception(exc) assert listeners is not None assert address is not None listeners.publish_command_failure( @@ -210,12 +249,32 @@ def command( name, request_id, address, + conn.server_connection_id, service_id=conn.service_id, database_name=dbname, ) raise + duration = datetime.datetime.now() - start + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.SUCCEEDED, + durationMS=duration, + reply=response_doc, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + speculative_authenticate="speculativeAuthenticate" in orig, + ) if publish: - duration = (datetime.datetime.now() - start) + encoding_duration assert listeners is not None assert address is not None listeners.publish_command_success( @@ -224,6 +283,7 @@ def command( name, request_id, address, + conn.server_connection_id, service_id=conn.service_id, speculative_hello=speculative_hello, database_name=dbname, @@ -290,35 +350,35 @@ def receive_message( def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: """Block until at least one byte is read, or a timeout, or a cancel.""" - context = conn.cancel_context - # Only Monitor connections can be cancelled. - if context: - sock = conn.conn - timed_out = False - while True: - # SSLSocket can have buffered data which won't be caught by select. - if hasattr(sock, "pending") and sock.pending() > 0: - readable = True + sock = conn.conn + timed_out = False + # Check if the connection's socket has been manually closed + if sock.fileno() == -1: + return + while True: + # SSLSocket can have buffered data which won't be caught by select. + if hasattr(sock, "pending") and sock.pending() > 0: + readable = True + else: + # Wait up to 500ms for the socket to become readable and then + # check for cancellation. + if deadline: + remaining = deadline - time.monotonic() + # When the timeout has expired perform one final check to + # see if the socket is readable. This helps avoid spurious + # timeouts on AWS Lambda and other FaaS environments. + if remaining <= 0: + timed_out = True + timeout = max(min(remaining, _POLL_TIMEOUT), 0) else: - # Wait up to 500ms for the socket to become readable and then - # check for cancellation. - if deadline: - remaining = deadline - time.monotonic() - # When the timeout has expired perform one final check to - # see if the socket is readable. This helps avoid spurious - # timeouts on AWS Lambda and other FaaS environments. - if remaining <= 0: - timed_out = True - timeout = max(min(remaining, _POLL_TIMEOUT), 0) - else: - timeout = _POLL_TIMEOUT - readable = conn.socket_checker.select(sock, read=True, timeout=timeout) - if context.cancelled: - raise _OperationCancelled("hello cancelled") - if readable: - return - if timed_out: - raise socket.timeout("timed out") + timeout = _POLL_TIMEOUT + readable = conn.socket_checker.select(sock, read=True, timeout=timeout) + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") + if readable: + return + if timed_out: + raise socket.timeout("timed out") # Errors raised by sockets (and TLS sockets) when in non-blocking mode. diff --git a/pymongo/operations.py b/pymongo/operations.py index 2c48a2994e..4872afa911 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -15,6 +15,7 @@ """Operation class definitions.""" from __future__ import annotations +import enum from typing import ( TYPE_CHECKING, Any, @@ -29,12 +30,12 @@ from bson.raw_bson import RawBSONDocument from pymongo import helpers from pymongo.collation import validate_collation_or_none -from pymongo.common import validate_boolean, validate_is_mapping, validate_list +from pymongo.common import validate_is_mapping, validate_list from pymongo.helpers import _gen_index_name, _index_document, _index_list from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.write_concern import validate_boolean if TYPE_CHECKING: - from bson.son import SON from pymongo.bulk import _Bulk # Hint supports index name, "myIndex", a list of either strings or index pairs: [('x', 1), ('y', -1), 'z''], or a dictionary @@ -44,6 +45,37 @@ _IndexKeyHint = Union[str, _IndexList] +class _Op(str, enum.Enum): + ABORT = "abortTransaction" + AGGREGATE = "aggregate" + COMMIT = "commitTransaction" + COUNT = "count" + CREATE = "create" + CREATE_INDEXES = "createIndexes" + CREATE_SEARCH_INDEXES = "createSearchIndexes" + DELETE = "delete" + DISTINCT = "distinct" + DROP = "drop" + DROP_DATABASE = "dropDatabase" + DROP_INDEXES = "dropIndexes" + DROP_SEARCH_INDEXES = "dropSearchIndexes" + END_SESSIONS = "endSessions" + FIND_AND_MODIFY = "findAndModify" + FIND = "find" + INSERT = "insert" + LIST_COLLECTIONS = "listCollections" + LIST_INDEXES = "listIndexes" + LIST_SEARCH_INDEX = "listSearchIndexes" + LIST_DATABASES = "listDatabases" + UPDATE = "update" + UPDATE_INDEX = "updateIndex" + UPDATE_SEARCH_INDEX = "updateSearchIndex" + RENAME = "rename" + GETMORE = "getMore" + KILL_CURSORS = "killCursors" + TEST = "testOperation" + + class InsertOne(Generic[_DocumentType]): """Represents an insert_one operation.""" @@ -54,8 +86,7 @@ def __init__(self, document: _DocumentType) -> None: For use with :meth:`~pymongo.collection.Collection.bulk_write`. - :Parameters: - - `document`: The document to insert. If the document is missing an + :param document: The document to insert. If the document is missing an _id field one will be added. """ self._doc = document @@ -91,11 +122,10 @@ def __init__( For use with :meth:`~pymongo.collection.Collection.bulk_write`. - :Parameters: - - `filter`: A query that matches the document to delete. - - `collation` (optional): An instance of + :param filter: A query that matches the document to delete. + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. @@ -110,7 +140,7 @@ def __init__( if filter is not None: validate_is_mapping("filter", filter) if hint is not None and not isinstance(hint, str): - self._hint: Union[str, SON[str, Any], None] = helpers._index_document(hint) + self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) else: self._hint = hint self._filter = filter @@ -156,11 +186,10 @@ def __init__( For use with :meth:`~pymongo.collection.Collection.bulk_write`. - :Parameters: - - `filter`: A query that matches the documents to delete. - - `collation` (optional): An instance of + :param filter: A query that matches the documents to delete. + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. @@ -175,7 +204,7 @@ def __init__( if filter is not None: validate_is_mapping("filter", filter) if hint is not None and not isinstance(hint, str): - self._hint: Union[str, SON[str, Any], None] = helpers._index_document(hint) + self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) else: self._hint = hint self._filter = filter @@ -223,14 +252,13 @@ def __init__( For use with :meth:`~pymongo.collection.Collection.bulk_write`. - :Parameters: - - `filter`: A query that matches the document to replace. - - `replacement`: The new document. - - `upsert` (optional): If ``True``, perform an insert if no documents + :param filter: A query that matches the document to replace. + :param replacement: The new document. + :param upsert: If ``True``, perform an insert if no documents match the filter. - - `collation` (optional): An instance of + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. @@ -247,7 +275,7 @@ def __init__( if upsert is not None: validate_boolean("upsert", upsert) if hint is not None and not isinstance(hint, str): - self._hint: Union[str, SON[str, Any], None] = helpers._index_document(hint) + self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) else: self._hint = hint self._filter = filter @@ -267,7 +295,13 @@ def _add_to_bulk(self, bulkobj: _Bulk) -> None: def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return (other._filter, other._doc, other._upsert, other._collation, other._hint,) == ( + return ( + other._filter, + other._doc, + other._upsert, + other._collation, + other._hint, + ) == ( self._filter, self._doc, self._upsert, @@ -311,7 +345,7 @@ def __init__( if array_filters is not None: validate_list("array_filters", array_filters) if hint is not None and not isinstance(hint, str): - self._hint: Union[str, SON[str, Any], None] = helpers._index_document(hint) + self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) else: self._hint = hint @@ -370,16 +404,15 @@ def __init__( For use with :meth:`~pymongo.collection.Collection.bulk_write`. - :Parameters: - - `filter`: A query that matches the document to update. - - `update`: The modifications to apply. - - `upsert` (optional): If ``True``, perform an insert if no documents + :param filter: A query that matches the document to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents match the filter. - - `collation` (optional): An instance of + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `array_filters` (optional): A list of filters specifying which + :param array_filters: A list of filters specifying which array elements an update should apply. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. @@ -428,16 +461,15 @@ def __init__( For use with :meth:`~pymongo.collection.Collection.bulk_write`. - :Parameters: - - `filter`: A query that matches the documents to update. - - `update`: The modifications to apply. - - `upsert` (optional): If ``True``, perform an insert if no documents + :param filter: A query that matches the documents to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents match the filter. - - `collation` (optional): An instance of + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `array_filters` (optional): A list of filters specifying which + :param array_filters: A list of filters specifying which array elements an update should apply. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. @@ -520,10 +552,9 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: See the MongoDB documentation for a full list of supported options by server version. - :Parameters: - - `keys`: a single key or a list containing (key, direction) pairs + :param keys: a single key or a list containing (key, direction) pairs or keys specifying the index to create. - - `**kwargs` (optional): any additional index creation + :param kwargs: any additional index creation options (see the above list) should be passed as keyword arguments. @@ -557,23 +588,34 @@ class SearchIndexModel: __slots__ = ("__document",) - def __init__(self, definition: Mapping[str, Any], name: Optional[str] = None) -> None: + def __init__( + self, + definition: Mapping[str, Any], + name: Optional[str] = None, + type: Optional[str] = None, + **kwargs: Any, + ) -> None: """Create a Search Index instance. For use with :meth:`~pymongo.collection.Collection.create_search_index` and :meth:`~pymongo.collection.Collection.create_search_indexes`. - :Parameters: - - `definition` - The definition for this index. - - `name` (optional) - The name for this index, if present. - - .. versionadded:: 4.5 + :param definition: The definition for this index. + :param name: The name for this index, if present. + :param type: The type for this index which defaults to "search". Alternative values include "vectorSearch". + :param kwargs: Keyword arguments supplying any additional options. .. note:: Search indexes require a MongoDB server version 7.0+ Atlas cluster. + .. versionadded:: 4.5 + .. versionchanged:: 4.7 + Added the type and kwargs arguments. """ + self.__document: dict[str, Any] = {} if name is not None: - self.__document = dict(name=name, definition=definition) - else: - self.__document = dict(definition=definition) + self.__document["name"] = name + self.__document["definition"] = definition + if type is not None: + self.__document["type"] = type + self.__document.update(kwargs) @property def document(self) -> Mapping[str, Any]: diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 003b05647c..9e9ead61fc 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -16,6 +16,7 @@ from __future__ import annotations +import sys import threading import time import weakref @@ -36,12 +37,11 @@ def __init__( If the target's return value is false, the executor stops. - :Parameters: - - `interval`: Seconds between calls to `target`. - - `min_interval`: Minimum seconds between calls if `wake` is + :param interval: Seconds between calls to `target`. + :param min_interval: Minimum seconds between calls if `wake` is called very often. - - `target`: A function. - - `name`: A name to give the underlying thread. + :param target: A function. + :param name: A name to give the underlying thread. """ # threading.Event and its internal condition variable are expensive # in Python 2, see PYTHON-983. Use a boolean to know when to wake. @@ -92,7 +92,15 @@ def open(self) -> None: thread.daemon = True self._thread = weakref.proxy(thread) _register_executor(self) - thread.start() + # Mitigation to RuntimeError firing when thread starts on shutdown + # https://github.com/python/cpython/issues/114570 + try: + thread.start() + except RuntimeError as e: + if "interpreter shutdown" in str(e) or sys.is_finalizing(): + self._thread = None + return + raise def close(self, dummy: Any = None) -> None: """Stop. To restart, call open(). diff --git a/pymongo/pool.py b/pymongo/pool.py index cdafb2cc2c..379127deee 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -17,6 +17,7 @@ import collections import contextlib import copy +import logging import os import platform import socket @@ -25,6 +26,7 @@ import threading import time import weakref +from pathlib import Path from typing import ( TYPE_CHECKING, Any, @@ -39,8 +41,7 @@ import bson from bson import DEFAULT_CODEC_OPTIONS -from bson.son import SON -from pymongo import __version__, _csot, auth, helpers +from pymongo import __version__, _csot, helpers from pymongo.client_session import _validate_session_write_concern from pymongo.common import ( MAX_BSON_SIZE, @@ -54,7 +55,7 @@ ORDERED_TYPES, WAIT_QUEUE_TIMEOUT, ) -from pymongo.errors import ( +from pymongo.errors import ( # type:ignore[attr-defined] AutoReconnect, ConfigurationError, ConnectionFailure, @@ -71,6 +72,12 @@ from pymongo.hello import Hello, HelloCompat from pymongo.helpers import _handle_reauth from pymongo.lock import _create_lock +from pymongo.logger import ( + _CONNECTION_LOGGER, + _ConnectionStatusMessage, + _debug_log, + _verbose_connection_error_reason, +) from pymongo.monitoring import ( ConnectionCheckOutFailedReason, ConnectionClosedReason, @@ -114,7 +121,7 @@ def _set_non_inheritable_non_atomic(fd: int) -> None: except ImportError: # Windows, various platforms we don't claim to support - # (Jython, IronPython, ...), systems that don't provide + # (Jython, IronPython, ..), systems that don't provide # everything we need from fcntl, etc. def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 """Dummy function for platforms that don't provide fcntl.""" @@ -179,11 +186,7 @@ def _set_keepalive_times(sock: socket.socket) -> None: _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) -_METADATA: SON[str, Any] = SON( - [ - ("driver", SON([("name", "PyMongo"), ("version", __version__)])), - ] -) +_METADATA: dict[str, Any] = {"driver": {"name": "PyMongo", "version": __version__}} if sys.platform.startswith("linux"): # platform.linux_distribution was deprecated in Python 3.5 @@ -191,61 +194,52 @@ def _set_keepalive_times(sock: socket.socket) -> None: # raises DeprecationWarning # DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 _name = platform.system() - _METADATA["os"] = SON( - [ - ("type", _name), - ("name", _name), - ("architecture", platform.machine()), - # Kernel version (e.g. 4.4.0-17-generic). - ("version", platform.release()), - ] - ) + _METADATA["os"] = { + "type": _name, + "name": _name, + "architecture": platform.machine(), + # Kernel version (e.g. 4.4.0-17-generic). + "version": platform.release(), + } elif sys.platform == "darwin": - _METADATA["os"] = SON( - [ - ("type", platform.system()), - ("name", platform.system()), - ("architecture", platform.machine()), - # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin - # kernel version. - ("version", platform.mac_ver()[0]), - ] - ) + _METADATA["os"] = { + "type": platform.system(), + "name": platform.system(), + "architecture": platform.machine(), + # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin + # kernel version. + "version": platform.mac_ver()[0], + } elif sys.platform == "win32": - _METADATA["os"] = SON( - [ - ("type", platform.system()), - # "Windows XP", "Windows 7", "Windows 10", etc. - ("name", " ".join((platform.system(), platform.release()))), - ("architecture", platform.machine()), - # Windows patch level (e.g. 5.1.2600-SP3) - ("version", "-".join(platform.win32_ver()[1:3])), - ] - ) + _ver = sys.getwindowsversion() + _METADATA["os"] = { + "type": "Windows", + "name": "Windows", + # Avoid using platform calls, see PYTHON-4455. + "architecture": os.environ.get("PROCESSOR_ARCHITECTURE") or platform.machine(), + # Windows patch level (e.g. 10.0.17763-SP0). + "version": ".".join(map(str, _ver[:3])) + f"-SP{_ver[-1] or '0'}", + } elif sys.platform.startswith("java"): _name, _ver, _arch = platform.java_ver()[-1] - _METADATA["os"] = SON( - [ - # Linux, Windows 7, Mac OS X, etc. - ("type", _name), - ("name", _name), - # x86, x86_64, AMD64, etc. - ("architecture", _arch), - # Linux kernel version, OSX version, etc. - ("version", _ver), - ] - ) + _METADATA["os"] = { + # Linux, Windows 7, Mac OS X, etc. + "type": _name, + "name": _name, + # x86, x86_64, AMD64, etc. + "architecture": _arch, + # Linux kernel version, OSX version, etc. + "version": _ver, + } else: # Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11) _aliased = platform.system_alias(platform.system(), platform.release(), platform.version()) - _METADATA["os"] = SON( - [ - ("type", platform.system()), - ("name", " ".join([part for part in _aliased[:2] if part])), - ("architecture", platform.machine()), - ("version", _aliased[2]), - ] - ) + _METADATA["os"] = { + "type": platform.system(), + "name": " ".join([part for part in _aliased[:2] if part]), + "architecture": platform.machine(), + "version": _aliased[2], + } if platform.python_implementation().startswith("PyPy"): _METADATA["platform"] = " ".join( @@ -268,6 +262,25 @@ def _set_keepalive_times(sock: socket.socket) -> None: (platform.python_implementation(), ".".join(map(str, sys.version_info))) ) +DOCKER_ENV_PATH = "/.dockerenv" +ENV_VAR_K8S = "KUBERNETES_SERVICE_HOST" + +RUNTIME_NAME_DOCKER = "docker" +ORCHESTRATOR_NAME_K8S = "kubernetes" + + +def get_container_env_info() -> dict[str, str]: + """Returns the runtime and orchestrator of a container. + If neither value is present, the metadata client.env.container field will be omitted.""" + container = {} + + if Path(DOCKER_ENV_PATH).exists(): + container["runtime"] = RUNTIME_NAME_DOCKER + if os.getenv(ENV_VAR_K8S): + container["orchestrator"] = ORCHESTRATOR_NAME_K8S + + return container + def _is_lambda() -> bool: if os.getenv("AWS_LAMBDA_RUNTIME_API"): @@ -307,6 +320,9 @@ def _getenv_int(key: str) -> Optional[int]: def _metadata_env() -> dict[str, Any]: env: dict[str, Any] = {} + container = get_container_env_info() + if container: + env["container"] = container # Skip if multiple (or no) envs are matched. if (_is_lambda(), _is_azure_func(), _is_gcp_func(), _is_vercel()).count(True) != 1: return env @@ -523,7 +539,7 @@ def __init__( # 'name': 'PyMongo|MyDriver', # 'version': '4.2.0|1.2.3', # }, - # 'platform': 'CPython 3.7.0|MyPlatform' + # 'platform': 'CPython 3.8.0|MyPlatform' # } if driver: if driver.name: @@ -659,7 +675,7 @@ def _compression_settings(self) -> Optional[CompressionSettings]: return self.__compression_settings @property - def metadata(self) -> SON[str, Any]: + def metadata(self) -> dict[str, Any]: """A dict of metadata about the application, driver, os, and platform.""" return self.__metadata.copy() @@ -691,11 +707,10 @@ def cancelled(self) -> bool: class Connection: """Store a connection with some metadata. - :Parameters: - - `conn`: a raw connection object - - `pool`: a Pool instance - - `address`: the server's (host, port) - - `id`: the id of this socket in it's pool + :param conn: a raw connection object + :param pool: a Pool instance + :param address: the server's (host, port) + :param id: the id of this socket in it's pool """ def __init__( @@ -719,6 +734,7 @@ def __init__( self.op_msg_enabled = False self.listeners = pool.opts._event_listeners self.enabled_for_cmap = pool.enabled_for_cmap + self.enabled_for_logging = pool.enabled_for_logging self.compression_settings = pool.opts._compression_settings self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None self.socket_checker: SocketChecker = SocketChecker() @@ -732,14 +748,12 @@ def __init__( self.pool_gen = pool.gen self.generation = self.pool_gen.get_overall() self.ready = False - self.cancel_context: Optional[_CancellationContext] = None - if not pool.handshake: - # This is a Monitor connection. - self.cancel_context = _CancellationContext() + self.cancel_context: _CancellationContext = _CancellationContext() self.opts = pool.opts self.more_to_come: bool = False # For load balancer support. self.service_id: Optional[ObjectId] = None + self.server_connection_id: Optional[int] = None # When executing a transaction in load balancing mode, this flag is # set to true to indicate that the session now owns the connection. self.pinned_txn = False @@ -747,6 +761,8 @@ def __init__( self.active = False self.last_timeout = self.opts.socket_timeout self.connect_rtt = 0.0 + self._client_id = pool._client_id + self.creation_time = time.monotonic() def set_conn_timeout(self, timeout: Optional[float]) -> None: """Cache last timeout to avoid duplicate calls to conn.settimeout.""" @@ -801,14 +817,14 @@ def unpin(self) -> None: else: self.close_conn(ConnectionClosedReason.STALE) - def hello_cmd(self) -> SON[str, Any]: + def hello_cmd(self) -> dict[str, Any]: # Handshake spec requires us to use OP_MSG+hello command for the # initial handshake in load balanced or stable API mode. if self.opts.server_api or self.hello_ok or self.opts.load_balanced: self.op_msg_enabled = True - return SON([(HelloCompat.CMD, 1)]) + return {HelloCompat.CMD: 1} else: - return SON([(HelloCompat.LEGACY_CMD, 1), ("helloOk", True)]) + return {HelloCompat.LEGACY_CMD: 1, "helloOk": True} def hello(self) -> Hello[dict[str, Any]]: return self._hello(None, None, None) @@ -845,6 +861,8 @@ def _hello( if creds: if creds.mechanism == "DEFAULT" and creds.username: cmd["saslSupportedMechs"] = creds.source + "." + creds.username + from pymongo import auth + auth_ctx = auth._AuthContext.from_credentials(creds, self.address) if auth_ctx: speculative_authenticate = auth_ctx.speculate_command() @@ -864,7 +882,10 @@ def _hello( self.max_bson_size = hello.max_bson_size self.max_message_size = hello.max_message_size self.max_write_batch_size = hello.max_write_batch_size - self.supports_sessions = hello.logical_session_timeout_minutes is not None + self.supports_sessions = ( + hello.logical_session_timeout_minutes is not None and hello.is_readable + ) + self.logical_session_timeout_minutes: Optional[int] = hello.logical_session_timeout_minutes self.hello_ok = hello.hello_ok self.is_repl = hello.server_type in ( SERVER_TYPE.RSPrimary, @@ -880,6 +901,7 @@ def _hello( self.compression_context = ctx self.op_msg_enabled = True + self.server_connection_id = hello.connection_id if creds: self.negotiated_mechs = hello.sasl_supported_mechs if auth_ctx: @@ -926,23 +948,22 @@ def command( ) -> dict[str, Any]: """Execute a command or raise an error. - :Parameters: - - `dbname`: name of the database on which to run the command - - `spec`: a command document as a dict, SON, or mapping object - - `read_preference`: a read preference - - `codec_options`: a CodecOptions instance - - `check`: raise OperationFailure if there are errors - - `allowable_errors`: errors to ignore if `check` is True - - `read_concern`: The read concern for this command. - - `write_concern`: The write concern for this command. - - `parse_write_concern_error`: Whether to parse the + :param dbname: name of the database on which to run the command + :param spec: a command document as a dict, SON, or mapping object + :param read_preference: a read preference + :param codec_options: a CodecOptions instance + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param read_concern: The read concern for this command. + :param write_concern: The write concern for this command. + :param parse_write_concern_error: Whether to parse the ``writeConcernError`` field in the command response. - - `collation`: The collation for this command. - - `session`: optional ClientSession instance. - - `client`: optional MongoClient for gossipping $clusterTime. - - `retryable_write`: True if this command is a retryable write. - - `publish_events`: Should we publish events for this command? - - `user_fields` (optional): Response fields that should be decoded + :param collation: The collation for this command. + :param session: optional ClientSession instance. + :param client: optional MongoClient for gossipping $clusterTime. + :param retryable_write: True if this command is a retryable write. + :param publish_events: Should we publish events for this command? + :param user_fields: Response fields that should be decoded using the TypeDecoders from codec_options, passed to bson._decode_all_selective. """ @@ -951,7 +972,7 @@ def command( # Ensure command name remains in first place. if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] - spec = SON(spec) + spec = dict(spec) if not (write_concern is None or write_concern.acknowledged or collation is None): raise ConfigurationError("Collation is unsupported for unacknowledged writes.") @@ -1034,9 +1055,8 @@ def unack_write(self, msg: bytes, max_doc_size: int) -> None: Can raise ConnectionFailure or InvalidDocument. - :Parameters: - - `msg`: bytes, an OP_MSG message. - - `max_doc_size`: size in bytes of the largest document in `msg`. + :param msg: bytes, an OP_MSG message. + :param max_doc_size: size in bytes of the largest document in `msg`. """ self._raise_if_not_writable(True) self.send_message(msg, max_doc_size) @@ -1048,9 +1068,8 @@ def write_command( Can raise ConnectionFailure or OperationFailure. - :Parameters: - - `request_id`: an int. - - `msg`: bytes, the command message. + :param request_id: an int. + :param msg: bytes, the command message. """ self.send_message(msg, 0) reply = self.receive_message(request_id) @@ -1075,11 +1094,24 @@ def authenticate(self, reauthenticate: bool = False) -> None: if not self.ready: creds = self.opts._credentials if creds: + from pymongo import auth + auth.authenticate(creds, self, reauthenticate=reauthenticate) self.ready = True + duration = time.monotonic() - self.creation_time if self.enabled_for_cmap: assert self.listeners is not None - self.listeners.publish_connection_ready(self.address, self.id) + self.listeners.publish_connection_ready(self.address, self.id, duration) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_READY, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + durationMS=duration, + ) def validate_session( self, client: Optional[MongoClient], session: Optional[ClientSession] @@ -1097,17 +1129,28 @@ def close_conn(self, reason: Optional[str]) -> None: if self.closed: return self._close_conn() - if reason and self.enabled_for_cmap: - assert self.listeners is not None - self.listeners.publish_connection_closed(self.address, self.id, reason) + if reason: + if self.enabled_for_cmap: + assert self.listeners is not None + self.listeners.publish_connection_closed(self.address, self.id, reason) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + reason=_verbose_connection_error_reason(reason), + error=reason, + ) def _close_conn(self) -> None: """Close this connection.""" if self.closed: return self.closed = True - if self.cancel_context: - self.cancel_context.cancel() + self.cancel_context.cancel() # Note: We catch exceptions to avoid spurious errors on interpreter # shutdown. try: @@ -1155,7 +1198,7 @@ def _raise_connection_failure(self, error: BaseException) -> NoReturn: # main thread. # # But in Gevent and Eventlet, the polling mechanism (epoll, kqueue, - # ...) is called in Python code, which experiences the signal as a + # ..) is called in Python code, which experiences the signal as a # KeyboardInterrupt from the start, rather than as an initial # socket.error, so we catch that, close the socket, and reraise it. # @@ -1355,12 +1398,17 @@ class PoolState: # Do *not* explicitly inherit from object or Jython won't call __del__ # http://bugs.jython.org/issue1057 class Pool: - def __init__(self, address: _Address, options: PoolOptions, handshake: bool = True): + def __init__( + self, + address: _Address, + options: PoolOptions, + handshake: bool = True, + client_id: Optional[ObjectId] = None, + ): """ - :Parameters: - - `address`: a (hostname, port) tuple - - `options`: a PoolOptions instance - - `handshake`: whether to call hello for each new Connection + :param address: a (hostname, port) tuple + :param options: a PoolOptions instance + :param handshake: whether to call hello for each new Connection """ if options.pause_enabled: self.state = PoolState.PAUSED @@ -1373,6 +1421,7 @@ def __init__(self, address: _Address, options: PoolOptions, handshake: bool = Tr # and returned to pool from the left side. Stale sockets removed # from the right side. self.conns: collections.deque = collections.deque() + self.active_contexts: set[_CancellationContext] = set() self.lock = _create_lock() self.active_sockets = 0 # Monotonically increasing connection ID required for CMAP Events. @@ -1394,6 +1443,7 @@ def __init__(self, address: _Address, options: PoolOptions, handshake: bool = Tr and self.opts._event_listeners is not None and self.opts._event_listeners.enabled_for_cmap ) + self.enabled_for_logging = self.handshake # The first portion of the wait queue. # Enforces: maxPoolSize @@ -1409,13 +1459,23 @@ def __init__(self, address: _Address, options: PoolOptions, handshake: bool = Tr self._max_connecting_cond = threading.Condition(self.lock) self._max_connecting = self.opts.max_connecting self._pending = 0 + self._client_id = client_id if self.enabled_for_cmap: assert self.opts._event_listeners is not None self.opts._event_listeners.publish_pool_created( self.address, self.opts.non_default_options ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_CREATED, + serverHost=self.address[0], + serverPort=self.address[1], + **self.opts.non_default_options, + ) # Similar to active_sockets but includes threads in the wait queue. - self.operation_count = 0 + self.operation_count: int = 0 # Retain references to pinned connections to prevent the CPython GC # from thinking that a cursor's pinned connection can be GC'd when the # cursor is GC'd (see PYTHON-2751). @@ -1431,13 +1491,25 @@ def ready(self) -> None: if self.enabled_for_cmap: assert self.opts._event_listeners is not None self.opts._event_listeners.publish_pool_ready(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_READY, + serverHost=self.address[0], + serverPort=self.address[1], + ) @property def closed(self) -> bool: return self.state == PoolState.CLOSED def _reset( - self, close: bool, pause: bool = True, service_id: Optional[ObjectId] = None + self, + close: bool, + pause: bool = True, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, ) -> None: old_state = self.state with self.size_cond: @@ -1470,6 +1542,10 @@ def _reset( self._max_connecting_cond.notify_all() self.size_cond.notify_all() + if interrupt_connections: + for context in self.active_contexts: + context.cancel() + listeners = self.opts._event_listeners # CMAP spec says that close() MUST close sockets before publishing the # PoolClosedEvent but that reset() SHOULD close sockets *after* @@ -1480,10 +1556,32 @@ def _reset( if self.enabled_for_cmap: assert listeners is not None listeners.publish_pool_closed(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + ) else: - if old_state != PoolState.PAUSED and self.enabled_for_cmap: - assert listeners is not None - listeners.publish_pool_cleared(self.address, service_id=service_id) + if old_state != PoolState.PAUSED: + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_cleared( + self.address, + service_id=service_id, + interrupt_connections=interrupt_connections, + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_CLEARED, + serverHost=self.address[0], + serverPort=self.address[1], + serviceId=service_id, + ) for conn in sockets: conn.close_conn(ConnectionClosedReason.STALE) @@ -1496,8 +1594,10 @@ def update_is_writable(self, is_writable: Optional[bool]) -> None: for _socket in self.conns: _socket.update_is_writable(self.is_writable) - def reset(self, service_id: Optional[ObjectId] = None) -> None: - self._reset(close=False, service_id=service_id) + def reset( + self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False + ) -> None: + self._reset(close=False, service_id=service_id, interrupt_connections=interrupt_connections) def reset_without_pause(self) -> None: self._reset(close=False, pause=False) @@ -1553,6 +1653,7 @@ def remove_stale_sockets(self, reference_generation: int) -> None: conn.close_conn(ConnectionClosedReason.STALE) return self.conns.appendleft(conn) + self.active_contexts.discard(conn.cancel_context) finally: if incremented: # Notify after adding the socket to the pool. @@ -1580,6 +1681,15 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect if self.enabled_for_cmap: assert listeners is not None listeners.publish_connection_created(self.address, conn_id) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CREATED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + ) try: sock = _configured_socket(self.address, self.opts) @@ -1589,7 +1699,17 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect listeners.publish_connection_closed( self.address, conn_id, ConnectionClosedReason.ERROR ) - + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) if isinstance(error, (IOError, OSError, SSLError)): details = _get_timeout_details(self.opts) _raise_connection_failure(self.address, error, timeout_details=details) @@ -1597,6 +1717,8 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect raise conn = Connection(sock, self, self.address, conn_id) # type: ignore[arg-type] + with self.lock: + self.active_contexts.add(conn.cancel_context) try: if self.handshake: conn.hello() @@ -1626,20 +1748,41 @@ def checkout(self, handler: Optional[_MongoClientErrorHandler] = None) -> Iterat Can raise ConnectionFailure or OperationFailure. - :Parameters: - - `handler` (optional): A _MongoClientErrorHandler. + :param handler: A _MongoClientErrorHandler. """ listeners = self.opts._event_listeners + checkout_started_time = time.monotonic() if self.enabled_for_cmap: assert listeners is not None listeners.publish_connection_check_out_started(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_STARTED, + serverHost=self.address[0], + serverPort=self.address[1], + ) - conn = self._get_conn(handler=handler) + conn = self._get_conn(checkout_started_time, handler=handler) + duration = time.monotonic() - checkout_started_time if self.enabled_for_cmap: assert listeners is not None - listeners.publish_connection_checked_out(self.address, conn.id) + listeners.publish_connection_checked_out(self.address, conn.id, duration) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_SUCCEEDED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + durationMS=duration, + ) try: + with self.lock: + self.active_contexts.add(conn.cancel_context) yield conn except BaseException: # Exception in caller. Ensure the connection gets returned. @@ -1666,19 +1809,35 @@ def checkout(self, handler: Optional[_MongoClientErrorHandler] = None) -> Iterat elif conn.active: self.checkin(conn) - def _raise_if_not_ready(self, emit_event: bool) -> None: + def _raise_if_not_ready(self, checkout_started_time: float, emit_event: bool) -> None: if self.state != PoolState.READY: - if self.enabled_for_cmap and emit_event: - assert self.opts._event_listeners is not None - self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR - ) + if emit_event: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) + details = _get_timeout_details(self.opts) _raise_connection_failure( self.address, AutoReconnect("connection pool paused"), timeout_details=details ) - def _get_conn(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connection: + def _get_conn( + self, checkout_started_time: float, handler: Optional[_MongoClientErrorHandler] = None + ) -> Connection: """Get or create a Connection. Can raise ConnectionFailure.""" # We use the pid here to avoid issues with fork / multiprocessing. # See test.test_client:TestClient.test_fork for an example of @@ -1687,10 +1846,22 @@ def _get_conn(self, handler: Optional[_MongoClientErrorHandler] = None) -> Conne self.reset_without_pause() if self.closed: + duration = time.monotonic() - checkout_started_time if self.enabled_for_cmap: assert self.opts._event_listeners is not None self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.POOL_CLOSED + self.address, ConnectionCheckOutFailedReason.POOL_CLOSED, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Connection pool was closed", + error=ConnectionCheckOutFailedReason.POOL_CLOSED, + durationMS=duration, ) raise _PoolClosedError( "Attempted to check out a connection from closed connection pool" @@ -1708,15 +1879,15 @@ def _get_conn(self, handler: Optional[_MongoClientErrorHandler] = None) -> Conne deadline = None with self.size_cond: - self._raise_if_not_ready(emit_event=True) + self._raise_if_not_ready(checkout_started_time, emit_event=True) while not (self.requests < self.max_pool_size): if not _cond_wait(self.size_cond, deadline): # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.requests < self.max_pool_size: self.size_cond.notify() - self._raise_wait_queue_timeout() - self._raise_if_not_ready(emit_event=True) + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=True) self.requests += 1 # We've now acquired the semaphore and must release it on error. @@ -1727,12 +1898,11 @@ def _get_conn(self, handler: Optional[_MongoClientErrorHandler] = None) -> Conne with self.lock: self.active_sockets += 1 incremented = True - while conn is None: # CMAP: we MUST wait for either maxConnecting OR for a socket # to be checked back into the pool. with self._max_connecting_cond: - self._raise_if_not_ready(emit_event=False) + self._raise_if_not_ready(checkout_started_time, emit_event=False) while not (self.conns or self._pending < self._max_connecting): if not _cond_wait(self._max_connecting_cond, deadline): # Timed out, notify the next thread to ensure a @@ -1740,8 +1910,8 @@ def _get_conn(self, handler: Optional[_MongoClientErrorHandler] = None) -> Conne if self.conns or self._pending < self._max_connecting: self._max_connecting_cond.notify() emitted_event = True - self._raise_wait_queue_timeout() - self._raise_if_not_ready(emit_event=False) + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=False) try: conn = self.conns.popleft() @@ -1768,11 +1938,24 @@ def _get_conn(self, handler: Optional[_MongoClientErrorHandler] = None) -> Conne self.active_sockets -= 1 self.size_cond.notify() - if self.enabled_for_cmap and not emitted_event: - assert self.opts._event_listeners is not None - self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR - ) + if not emitted_event: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) raise conn.active = True @@ -1781,8 +1964,7 @@ def _get_conn(self, handler: Optional[_MongoClientErrorHandler] = None) -> Conne def checkin(self, conn: Connection) -> None: """Return the connection to the pool, or if it's closed discard it. - :Parameters: - - `conn`: The connection to check into the pool. + :param conn: The connection to check into the pool. """ txn = conn.pinned_txn cursor = conn.pinned_cursor @@ -1791,9 +1973,20 @@ def checkin(self, conn: Connection) -> None: conn.pinned_cursor = False self.__pinned_sockets.discard(conn) listeners = self.opts._event_listeners + with self.lock: + self.active_contexts.discard(conn.cancel_context) if self.enabled_for_cmap: assert listeners is not None listeners.publish_connection_checked_in(self.address, conn.id) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKEDIN, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + ) if self.pid != os.getpid(): self.reset_without_pause() else: @@ -1806,6 +1999,17 @@ def checkin(self, conn: Connection) -> None: listeners.publish_connection_closed( self.address, conn.id, ConnectionClosedReason.ERROR ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) else: with self.lock: # Hold the lock to ensure this section does not race with @@ -1865,12 +2069,24 @@ def _perished(self, conn: Connection) -> bool: return False - def _raise_wait_queue_timeout(self) -> NoReturn: + def _raise_wait_queue_timeout(self, checkout_started_time: float) -> NoReturn: listeners = self.opts._event_listeners + duration = time.monotonic() - checkout_started_time if self.enabled_for_cmap: assert listeners is not None listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.TIMEOUT + self.address, ConnectionCheckOutFailedReason.TIMEOUT, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Wait queue timeout elapsed without a connection becoming available", + error=ConnectionCheckOutFailedReason.TIMEOUT, + durationMS=duration, ) timeout = _csot.get_timeout() or self.opts.wait_queue_timeout if self.opts.load_balanced: diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 6657937e99..b08588daff 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -25,16 +25,13 @@ from ipaddress import ip_address as _ip_address from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, Union -from cryptography.x509 import load_der_x509_certificate as _load_der_x509_certificate +import cryptography.x509 as x509 +import service_identity from OpenSSL import SSL as _SSL from OpenSSL import crypto as _crypto -from service_identity import CertificateError as _SICertificateError -from service_identity import VerificationError as _SIVerificationError -from service_identity.pyopenssl import verify_hostname as _verify_hostname -from service_identity.pyopenssl import verify_ip_address as _verify_ip_address from pymongo.errors import ConfigurationError as _ConfigurationError -from pymongo.errors import _CertificateError +from pymongo.errors import _CertificateError # type:ignore[attr-defined] from pymongo.ocsp_cache import _OCSPCache from pymongo.ocsp_support import _load_trusted_ca_certs, _ocsp_callback from pymongo.socket_checker import SocketChecker as _SocketChecker @@ -44,7 +41,6 @@ if TYPE_CHECKING: from ssl import VerifyMode - from cryptography.x509 import Certificate _T = TypeVar("_T") @@ -184,7 +180,7 @@ class _CallbackData: """Data class which is passed to the OCSP callback.""" def __init__(self) -> None: - self.trusted_ca_certs: Optional[list[Certificate]] = None + self.trusted_ca_certs: Optional[list[x509.Certificate]] = None self.check_ocsp_endpoint: Optional[bool] = None self.ocsp_response_cache = _OCSPCache() @@ -270,7 +266,7 @@ def __get_options(self) -> None: return self._ctx.set_options(0) def __set_options(self, value: int) -> None: - # Explcitly convert to int, since newer CPython versions + # Explicitly convert to int, since newer CPython versions # use enum.IntFlag for options. The values are the same # regardless of implementation. self._ctx.set_options(int(value)) @@ -336,11 +332,12 @@ def _load_wincerts(self, store: str) -> None: """Attempt to load CA certs from Windows trust store.""" cert_store = self._ctx.get_cert_store() oid = _stdlibssl.Purpose.SERVER_AUTH.oid + for cert, encoding, trust in _stdlibssl.enum_certificates(store): # type: ignore if encoding == "x509_asn": if trust is True or oid in trust: cert_store.add_cert( - _crypto.X509.from_cryptography(_load_der_x509_certificate(cert)) + _crypto.X509.from_cryptography(x509.load_der_x509_certificate(cert)) ) def load_default_certs(self) -> None: @@ -404,11 +401,16 @@ def wrap_socket( # XXX: Do this in a callback registered with # SSLContext.set_info_callback? See Twisted for an example. if self.check_hostname and server_hostname is not None: + from service_identity import pyopenssl + try: if _is_ip_address(server_hostname): - _verify_ip_address(ssl_conn, server_hostname) + pyopenssl.verify_ip_address(ssl_conn, server_hostname) else: - _verify_hostname(ssl_conn, server_hostname) - except (_SICertificateError, _SIVerificationError) as exc: + pyopenssl.verify_hostname(ssl_conn, server_hostname) + except ( # type:ignore[misc] + service_identity.SICertificateError, + service_identity.SIVerificationError, + ) as exc: raise _CertificateError(str(exc)) from None return ssl_conn diff --git a/pymongo/read_concern.py b/pymongo/read_concern.py index 0b54ee86f7..eda715f7c0 100644 --- a/pymongo/read_concern.py +++ b/pymongo/read_concern.py @@ -21,8 +21,7 @@ class ReadConcern: """ReadConcern - :Parameters: - - `level`: (string) The read concern level specifies the level of + :param level: (string) The read concern level specifies the level of isolation for read operations. For example, a read operation using a read concern level of ``majority`` will only return data that has been written to a majority of nodes. If the level is left unspecified, the diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 986cc772bf..7752750c46 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -301,15 +301,14 @@ class PrimaryPreferred(_ServerMode): created reads will be routed to an available secondary until the primary of the replica set is discovered. - :Parameters: - - `tag_sets`: The :attr:`~tag_sets` to use if the primary is not + :param tag_sets: The :attr:`~tag_sets` to use if the primary is not available. - - `max_staleness`: (integer, in seconds) The maximum estimated + :param max_staleness: (integer, in seconds) The maximum estimated length of time a replica set secondary can fall behind the primary in replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. - - `hedge`: The :attr:`~hedge` to use if the primary is not available. + :param hedge: The :attr:`~hedge` to use if the primary is not available. .. versionchanged:: 3.11 Added ``hedge`` parameter. @@ -345,14 +344,13 @@ class Secondary(_ServerMode): * When connected to a replica set queries are distributed among secondaries. An error is raised if no secondaries are available. - :Parameters: - - `tag_sets`: The :attr:`~tag_sets` for this read preference. - - `max_staleness`: (integer, in seconds) The maximum estimated + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated length of time a replica set secondary can fall behind the primary in replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. - - `hedge`: The :attr:`~hedge` for this read preference. + :param hedge: The :attr:`~hedge` for this read preference. .. versionchanged:: 3.11 Added ``hedge`` parameter. @@ -389,14 +387,13 @@ class SecondaryPreferred(_ServerMode): created reads will be routed to the primary of the replica set until an available secondary is discovered. - :Parameters: - - `tag_sets`: The :attr:`~tag_sets` for this read preference. - - `max_staleness`: (integer, in seconds) The maximum estimated + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated length of time a replica set secondary can fall behind the primary in replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. - - `hedge`: The :attr:`~hedge` for this read preference. + :param hedge: The :attr:`~hedge` for this read preference. .. versionchanged:: 3.11 Added ``hedge`` parameter. @@ -434,14 +431,13 @@ class Nearest(_ServerMode): * When connected to a replica set queries are distributed among all members. - :Parameters: - - `tag_sets`: The :attr:`~tag_sets` for this read preference. - - `max_staleness`: (integer, in seconds) The maximum estimated + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated length of time a replica set secondary can fall behind the primary in replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. - - `hedge`: The :attr:`~hedge` for this read preference. + :param hedge: The :attr:`~hedge` for this read preference. .. versionchanged:: 3.11 Added ``hedge`` parameter. @@ -471,8 +467,7 @@ class _AggWritePref: primary read preference. * Otherwise use `pref` read preference. - :Parameters: - - `pref`: The read preference to use on MongoDB 5.0+. + :param pref: The read preference to use on MongoDB 5.0+. """ __slots__ = ("pref", "effective_pref") diff --git a/pymongo/response.py b/pymongo/response.py index 5ff6ca707e..5cdd3e7e8d 100644 --- a/pymongo/response.py +++ b/pymongo/response.py @@ -39,12 +39,11 @@ def __init__( ): """Represent a response from the server. - :Parameters: - - `data`: A network response message. - - `address`: (host, port) of the source server. - - `request_id`: The request id of this operation. - - `duration`: The duration of the operation. - - `from_command`: if the response is the result of a db command. + :param data: A network response message. + :param address: (host, port) of the source server. + :param request_id: The request id of this operation. + :param duration: The duration of the operation. + :param from_command: if the response is the result of a db command. """ self._data = data self._address = address @@ -100,15 +99,14 @@ def __init__( ): """Represent a response to an exhaust cursor's initial query. - :Parameters: - - `data`: A network response message. - - `address`: (host, port) of the source server. - - `conn`: The Connection used for the initial query. - - `request_id`: The request id of this operation. - - `duration`: The duration of the operation. - - `from_command`: If the response is the result of a db command. - - `docs`: List of documents. - - `more_to_come`: Bool indicating whether cursor is ready to be + :param data: A network response message. + :param address: (host, port) of the source server. + :param conn: The Connection used for the initial query. + :param request_id: The request id of this operation. + :param duration: The duration of the operation. + :param from_command: If the response is the result of a db command. + :param docs: List of documents. + :param more_to_come: Bool indicating whether cursor is ready to be exhausted. """ super().__init__(data, address, request_id, duration, from_command, docs) diff --git a/pymongo/results.py b/pymongo/results.py index 20c6023cd2..f57286569b 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -187,9 +187,8 @@ class BulkWriteResult(_WriteResult): def __init__(self, bulk_api_result: dict[str, Any], acknowledged: bool) -> None: """Create a BulkWriteResult instance. - :Parameters: - - `bulk_api_result`: A result dict from the bulk API - - `acknowledged`: Was this write result acknowledged? If ``False`` + :param bulk_api_result: A result dict from the bulk API + :param acknowledged: Was this write result acknowledged? If ``False`` then all properties of this object will raise :exc:`~pymongo.errors.InvalidOperation`. """ diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py index 02c845079a..7fb546f61b 100644 --- a/pymongo/saslprep.py +++ b/pymongo/saslprep.py @@ -23,7 +23,8 @@ HAVE_STRINGPREP = False def saslprep( - data: Any, prohibit_unassigned_code_points: Optional[bool] = True # noqa: ARG001 + data: Any, + prohibit_unassigned_code_points: Optional[bool] = True, # noqa: ARG001 ) -> Any: """SASLprep dummy""" if isinstance(data, str): @@ -56,18 +57,16 @@ def saslprep( def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> Any: """An implementation of RFC4013 SASLprep. - :Parameters: - - `data`: The string to SASLprep. Unicode strings + :param data: The string to SASLprep. Unicode strings (:class:`str`) are supported. Byte strings (:class:`bytes`) are ignored. - - `prohibit_unassigned_code_points`: True / False. RFC 3454 + :param prohibit_unassigned_code_points: True / False. RFC 3454 and RFCs for various SASL mechanisms distinguish between `queries` (unassigned code points allowed) and `stored strings` (unassigned code points prohibited). Defaults to ``True`` (unassigned code points are prohibited). - :Returns: - The SASLprep'ed version of `data`. + :return: The SASLprep'ed version of `data`. """ prohibited: Any diff --git a/pymongo/server.py b/pymongo/server.py index f431fd0140..1c437a7eef 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -15,12 +15,14 @@ """Communicate with one MongoDB server in a topology.""" from __future__ import annotations +import logging from datetime import datetime from typing import TYPE_CHECKING, Any, Callable, ContextManager, Optional, Union from bson import _decode_all_selective from pymongo.errors import NotPrimaryError, OperationFailure from pymongo.helpers import _check_command_response, _handle_reauth +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query from pymongo.response import PinnedResponse, Response @@ -29,7 +31,7 @@ from weakref import ReferenceType from bson.objectid import ObjectId - from pymongo.mongo_client import _MongoClientErrorHandler + from pymongo.mongo_client import MongoClient, _MongoClientErrorHandler from pymongo.monitor import Monitor from pymongo.monitoring import _EventListeners from pymongo.pool import Connection, Pool @@ -88,7 +90,7 @@ def close(self) -> None: ) ) self._monitor.close() - self._pool.reset_without_pause() + self._pool.close() def request_check(self) -> None: """Check the server's state soon.""" @@ -102,6 +104,7 @@ def run_operation( read_preference: _ServerMode, listeners: Optional[_EventListeners], unpack_res: Callable[..., list[_DocumentOut]], + client: MongoClient, ) -> Response: """Run a _Query or _GetMore operation and return a Response object. @@ -109,18 +112,16 @@ def run_operation( cursors. Can raise ConnectionFailure, OperationFailure, etc. - :Parameters: - - `conn`: A Connection instance. - - `operation`: A _Query or _GetMore object. - - `read_preference`: The read preference to use. - - `listeners`: Instance of _EventListeners or None. - - `unpack_res`: A callable that decodes the wire protocol response. + :param conn: A Connection instance. + :param operation: A _Query or _GetMore object. + :param read_preference: The read preference to use. + :param listeners: Instance of _EventListeners or None. + :param unpack_res: A callable that decodes the wire protocol response. """ duration = None assert listeners is not None publish = listeners.enabled_for_commands - if publish: - start = datetime.now() + start = datetime.now() use_cmd = operation.use_command(conn) more_to_come = operation.conn_mgr and operation.conn_mgr.more_to_come @@ -130,15 +131,37 @@ def run_operation( message = operation.get_message(read_preference, conn, use_cmd) request_id, data, max_doc_size = self._split_message(message) + cmd, dbn = operation.as_command(conn) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.STARTED, + command=cmd, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + if publish: cmd, dbn = operation.as_command(conn) if "$db" not in cmd: cmd["$db"] = dbn assert listeners is not None listeners.publish_command_start( - cmd, dbn, request_id, conn.address, service_id=conn.service_id + cmd, + dbn, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, ) - start = datetime.now() try: if more_to_come: @@ -166,12 +189,30 @@ def run_operation( operation.client._process_response(first, operation.session) _check_command_response(first, conn.max_wire_version) except Exception as exc: + duration = datetime.now() - start + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.FAILED, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) if publish: - duration = datetime.now() - start - if isinstance(exc, (NotPrimaryError, OperationFailure)): - failure: _DocumentOut = exc.details # type: ignore[assignment] - else: - failure = _convert_exception(exc) assert listeners is not None listeners.publish_command_failure( duration, @@ -179,25 +220,42 @@ def run_operation( operation.name, request_id, conn.address, + conn.server_connection_id, service_id=conn.service_id, database_name=dbn, ) raise - - if publish: - duration = datetime.now() - start - # Must publish in find / getMore / explain command response - # format. - if use_cmd: - res: _DocumentOut = docs[0] - elif operation.name == "explain": - res = docs[0] if docs else {} + duration = datetime.now() - start + # Must publish in find / getMore / explain command response + # format. + if use_cmd: + res = docs[0] + elif operation.name == "explain": + res = docs[0] if docs else {} + else: + res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} # type: ignore[union-attr] + if operation.name == "find": + res["cursor"]["firstBatch"] = docs else: - res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} # type: ignore[union-attr] - if operation.name == "find": - res["cursor"]["firstBatch"] = docs - else: - res["cursor"]["nextBatch"] = docs + res["cursor"]["nextBatch"] = docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.SUCCEEDED, + durationMS=duration, + reply=res, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + if publish: assert listeners is not None listeners.publish_command_success( duration, @@ -205,6 +263,7 @@ def run_operation( operation.name, request_id, conn.address, + conn.server_connection_id, service_id=conn.service_id, database_name=dbn, ) @@ -274,8 +333,7 @@ def _split_message( ) -> tuple[int, Any, int]: """Return request_id, data, max_doc_size. - :Parameters: - - `message`: (request_id, data, max_doc_size) or (request_id, data) + :param message: (request_id, data, max_doc_size) or (request_id, data) """ if len(message) == 3: return message # type: ignore[return-value] diff --git a/pymongo/server_api.py b/pymongo/server_api.py index 90505bc5ae..4a746008c4 100644 --- a/pymongo/server_api.py +++ b/pymongo/server_api.py @@ -106,12 +106,11 @@ def __init__( ): """Options to configure MongoDB Stable API. - :Parameters: - - `version`: The API version string. Must be one of the values in + :param version: The API version string. Must be one of the values in :class:`ServerApiVersion`. - - `strict` (optional): Set to ``True`` to enable API strict mode. + :param strict: Set to ``True`` to enable API strict mode. Defaults to ``None`` which means "use the server's default". - - `deprecation_errors` (optional): Set to ``True`` to enable + :param deprecation_errors: Set to ``True`` to enable deprecation errors. Defaults to ``None`` which means "use the server's default". @@ -162,9 +161,8 @@ def deprecation_errors(self) -> Optional[bool]: def _add_to_command(cmd: MutableMapping[str, Any], server_api: Optional[ServerApi]) -> None: """Internal helper which adds API versioning options to a command. - :Parameters: - - `cmd`: The command. - - `server_api` (optional): A :class:`ServerApi` or ``None``. + :param cmd: The command. + :param server_api: A :class:`ServerApi` or ``None``. """ if not server_api: return diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 3b4131f327..6393fce0a1 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -29,12 +29,11 @@ class ServerDescription: """Immutable representation of one server. - :Parameters: - - `address`: A (host, port) pair - - `hello`: Optional Hello instance - - `round_trip_time`: Optional float - - `error`: Optional, the last error attempting to connect to the server - - `round_trip_time`: Optional float, the min latency from the most recent samples + :param address: A (host, port) pair + :param hello: Optional Hello instance + :param round_trip_time: Optional float + :param error: Optional, the last error attempting to connect to the server + :param round_trip_time: Optional float, the min latency from the most recent samples """ __slots__ = ( @@ -244,8 +243,7 @@ def is_server_type_known(self) -> bool: def retryable_writes_supported(self) -> bool: """Checks if this server supports retryable writes.""" return ( - self._ls_timeout_minutes is not None - and self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary) + self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary) ) or self._server_type == SERVER_TYPE.LoadBalancer @property diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index 76c8b5161c..6f6cc285fa 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -17,17 +17,22 @@ import ipaddress import random -from typing import Any, Optional, Union +from typing import TYPE_CHECKING, Any, Optional, Union -try: +from pymongo.common import CONNECT_TIMEOUT +from pymongo.errors import ConfigurationError + +if TYPE_CHECKING: from dns import resolver - _HAVE_DNSPYTHON = True -except ImportError: - _HAVE_DNSPYTHON = False -from pymongo.common import CONNECT_TIMEOUT -from pymongo.errors import ConfigurationError +def _have_dnspython() -> bool: + try: + import dns # noqa: F401 + + return True + except ImportError: + return False # dnspython can return bytes or str from various parts @@ -40,6 +45,8 @@ def maybe_decode(text: Union[str, bytes]) -> str: # PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. def _resolve(*args: Any, **kwargs: Any) -> resolver.Answer: + from dns import resolver + if hasattr(resolver, "resolve"): # dnspython >= 2 return resolver.resolve(*args, **kwargs) @@ -81,6 +88,8 @@ def __init__( raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) def get_options(self) -> Optional[str]: + from dns import resolver + try: results = _resolve(self.__fqdn, "TXT", lifetime=self.__connect_timeout) except (resolver.NoAnswer, resolver.NXDOMAIN): diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 3c9ee01ef1..6a5dd278d3 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -15,6 +15,7 @@ """Support for SSL in PyMongo.""" from __future__ import annotations +import warnings from typing import Optional from pymongo.errors import ConfigurationError @@ -23,7 +24,17 @@ try: import pymongo.pyopenssl_context as _ssl -except ImportError: +except (ImportError, AttributeError) as exc: + if isinstance(exc, AttributeError): + warnings.warn( + "Failed to use the installed version of PyOpenSSL. " + "Falling back to stdlib ssl, disabling OCSP support. " + "This is likely caused by incompatible versions " + "of PyOpenSSL < 23.2.0 and cryptography >= 42.0.0. " + "Try updating PyOpenSSL >= 23.2.0 to enable OCSP.", + UserWarning, + stacklevel=2, + ) try: import pymongo.ssl_context as _ssl # type: ignore[no-redef] except ImportError: @@ -97,7 +108,7 @@ class SSLError(Exception): # type: ignore HAS_SNI = False IPADDR_SAFE = False - BLOCKING_IO_ERRORS = () # type: ignore + BLOCKING_IO_ERRORS = () # type:ignore[assignment] def get_ssl_context(*dummy): # type: ignore """No ssl module, raise ConfigurationError.""" diff --git a/pymongo/topology.py b/pymongo/topology.py index 786be3ec93..ea623cd1b4 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -16,18 +16,20 @@ from __future__ import annotations +import logging import os import queue import random +import sys import time import warnings import weakref +from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, cast from pymongo import _csot, common, helpers, periodic_executor from pymongo.client_session import _ServerSession, _ServerSessionPool from pymongo.errors import ( - ConfigurationError, ConnectionFailure, InvalidOperation, NetworkTimeout, @@ -39,6 +41,11 @@ ) from pymongo.hello import Hello from pymongo.lock import _create_lock +from pymongo.logger import ( + _SERVER_SELECTION_LOGGER, + _debug_log, + _ServerSelectionStatusMessage, +) from pymongo.monitor import SrvMonitor from pymongo.pool import Pool, PoolOptions from pymongo.server import Server @@ -47,7 +54,6 @@ Selection, any_server_selector, arbiter_server_selector, - readable_server_selector, secondary_server_selector, writable_server_selector, ) @@ -65,6 +71,9 @@ from pymongo.typings import ClusterTime, _Address +_pymongo_dir = str(Path(__file__).parent) + + def process_events_queue(queue_ref: weakref.ReferenceType[queue.Queue]) -> bool: q = queue_ref() if not q: @@ -182,12 +191,17 @@ def open(self) -> None: self._pid = pid elif pid != self._pid: self._pid = pid - warnings.warn( + if sys.version_info[:2] >= (3, 12): + kwargs = {"skip_file_prefixes": (_pymongo_dir,)} + else: + kwargs = {"stacklevel": 6} + # Ignore B028 warning for missing stacklevel. + warnings.warn( # type: ignore[call-overload] # noqa: B028 "MongoClient opened before fork. May not be entirely fork-safe, " "proceed with caution. See PyMongo's documentation for details: " "https://pymongo.readthedocs.io/en/stable/faq.html#" "is-pymongo-fork-safe", - stacklevel=2, + **kwargs, ) with self._lock: # Close servers and clear the pools. @@ -210,18 +224,20 @@ def get_server_selection_timeout(self) -> float: def select_servers( self, selector: Callable[[Selection], Selection], + operation: str, server_selection_timeout: Optional[float] = None, address: Optional[_Address] = None, + operation_id: Optional[int] = None, ) -> list[Server]: """Return a list of Servers matching selector, or time out. - :Parameters: - - `selector`: function that takes a list of Servers and returns + :param selector: function that takes a list of Servers and returns a subset of them. - - `server_selection_timeout` (optional): maximum seconds to wait. + :param operation: The name of the operation that the server is being selected for. + :param server_selection_timeout: maximum seconds to wait. If not provided, the default value common.SERVER_SELECTION_TIMEOUT is used. - - `address`: optional server address to select. + :param address: optional server address to select. Calls self.open() if needed. @@ -234,7 +250,9 @@ def select_servers( server_timeout = server_selection_timeout with self._lock: - server_descriptions = self._select_servers_loop(selector, server_timeout, address) + server_descriptions = self._select_servers_loop( + selector, server_timeout, operation, operation_id, address + ) return [ cast(Server, self.get_server_by_address(sd.address)) for sd in server_descriptions @@ -244,11 +262,26 @@ def _select_servers_loop( self, selector: Callable[[Selection], Selection], timeout: float, + operation: str, + operation_id: Optional[int], address: Optional[_Address], ) -> list[ServerDescription]: """select_servers() guts. Hold the lock when calling this.""" now = time.monotonic() end_time = now + timeout + logged_waiting = False + + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.STARTED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + ) + server_descriptions = self._description.apply_selector( selector, address, custom_selector=self._settings.server_selector ) @@ -256,10 +289,34 @@ def _select_servers_loop( while not server_descriptions: # No suitable servers. if timeout == 0 or now > end_time: + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.FAILED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + failure=self._error_message(selector), + ) raise ServerSelectionTimeoutError( f"{self._error_message(selector)}, Timeout: {timeout}s, Topology Description: {self.description!r}" ) + if not logged_waiting: + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.WAITING, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + remainingTimeMS=int(end_time - time.monotonic()), + ) + logged_waiting = True + self._ensure_opened() self._request_check_all() @@ -280,10 +337,16 @@ def _select_servers_loop( def _select_server( self, selector: Callable[[Selection], Selection], + operation: str, server_selection_timeout: Optional[float] = None, address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, ) -> Server: - servers = self.select_servers(selector, server_selection_timeout, address) + servers = self.select_servers( + selector, operation, server_selection_timeout, address, operation_id + ) + servers = _filter_servers(servers, deprioritized_servers) if len(servers) == 1: return servers[0] server1, server2 = random.sample(servers, 2) @@ -295,17 +358,43 @@ def _select_server( def select_server( self, selector: Callable[[Selection], Selection], + operation: str, server_selection_timeout: Optional[float] = None, address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, ) -> Server: """Like select_servers, but choose a random server if several match.""" - server = self._select_server(selector, server_selection_timeout, address) + server = self._select_server( + selector, + operation, + server_selection_timeout, + address, + deprioritized_servers, + operation_id=operation_id, + ) if _csot.get_timeout(): _csot.set_rtt(server.description.min_round_trip_time) + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.SUCCEEDED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + serverHost=server.description.address[0], + serverPort=server.description.address[1], + ) return server def select_server_by_address( - self, address: _Address, server_selection_timeout: Optional[int] = None + self, + address: _Address, + operation: str, + server_selection_timeout: Optional[int] = None, + operation_id: Optional[int] = None, ) -> Server: """Return a Server for "address", reconnecting if necessary. @@ -313,21 +402,31 @@ def select_server_by_address( servers. Time out after "server_selection_timeout" if the server cannot be reached. - :Parameters: - - `address`: A (host, port) pair. - - `server_selection_timeout` (optional): maximum seconds to wait. + :param address: A (host, port) pair. + :param operation: The name of the operation that the server is being selected for. + :param server_selection_timeout: maximum seconds to wait. If not provided, the default value common.SERVER_SELECTION_TIMEOUT is used. + :param operation_id: The unique id of the current operation being performed. Defaults to None if not provided. Calls self.open() if needed. Raises exc:`ServerSelectionTimeoutError` after `server_selection_timeout` if no matching servers are found. """ - return self.select_server(any_server_selector, server_selection_timeout, address) + return self.select_server( + any_server_selector, + operation, + server_selection_timeout, + address, + operation_id=operation_id, + ) def _process_change( - self, server_description: ServerDescription, reset_pool: bool = False + self, + server_description: ServerDescription, + reset_pool: bool = False, + interrupt_connections: bool = False, ) -> None: """Process a new ServerDescription on an opened topology. @@ -384,12 +483,17 @@ def _process_change( if reset_pool: server = self._servers.get(server_description.address) if server: - server.pool.reset() + server.pool.reset(interrupt_connections=interrupt_connections) # Wake waiters in select_servers(). self._condition.notify_all() - def on_change(self, server_description: ServerDescription, reset_pool: bool = False) -> None: + def on_change( + self, + server_description: ServerDescription, + reset_pool: bool = False, + interrupt_connections: bool = False, + ) -> None: """Process a new ServerDescription after an hello call completes.""" # We do no I/O holding the lock. with self._lock: @@ -402,7 +506,7 @@ def on_change(self, server_description: ServerDescription, reset_pool: bool = Fa # change removed it. E.g., we got a host list from the primary # that didn't include this server. if self._opened and self._description.has_server(server_description.address): - self._process_change(server_description, reset_pool) + self._process_change(server_description, reset_pool, interrupt_connections) def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: """Process a new seedlist on an opened topology. @@ -565,51 +669,14 @@ def description(self) -> TopologyDescription: def pop_all_sessions(self) -> list[_ServerSession]: """Pop all session ids from the pool.""" - with self._lock: - return self._session_pool.pop_all() + return self._session_pool.pop_all() - def _check_implicit_session_support(self) -> None: - with self._lock: - self._check_session_support() - - def _check_session_support(self) -> float: - """Internal check for session support on clusters.""" - if self._settings.load_balanced: - # Sessions never time out in load balanced mode. - return float("inf") - session_timeout = self._description.logical_session_timeout_minutes - if session_timeout is None: - # Maybe we need an initial scan? Can raise ServerSelectionError. - if self._description.topology_type == TOPOLOGY_TYPE.Single: - if not self._description.has_known_servers: - self._select_servers_loop( - any_server_selector, self.get_server_selection_timeout(), None - ) - elif not self._description.readable_servers: - self._select_servers_loop( - readable_server_selector, self.get_server_selection_timeout(), None - ) - - session_timeout = self._description.logical_session_timeout_minutes - if session_timeout is None: - raise ConfigurationError("Sessions are not supported by this MongoDB deployment") - return session_timeout - - def get_server_session(self) -> _ServerSession: + def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: """Start or resume a server session, or raise ConfigurationError.""" - with self._lock: - session_timeout = self._check_session_support() - return self._session_pool.get_server_session(session_timeout) + return self._session_pool.get_server_session(session_timeout_minutes) - def return_server_session(self, server_session: _ServerSession, lock: bool) -> None: - if lock: - with self._lock: - self._session_pool.return_server_session( - server_session, self._description.logical_session_timeout_minutes - ) - else: - # Called from a __del__ method, can't use a lock. - self._session_pool.return_server_session_no_lock(server_session) + def return_server_session(self, server_session: _ServerSession) -> None: + self._session_pool.return_server_session(server_session) def _new_selection(self) -> Selection: """A Selection object, initially including all known servers. @@ -794,7 +861,9 @@ def _update_servers(self) -> None: self._servers.pop(address) def _create_pool_for_server(self, address: _Address) -> Pool: - return self._settings.pool_class(address, self._settings.pool_options) + return self._settings.pool_class( + address, self._settings.pool_options, client_id=self._topology_id + ) def _create_pool_for_monitor(self, address: _Address) -> Pool: options = self._settings.pool_options @@ -814,7 +883,9 @@ def _create_pool_for_monitor(self, address: _Address) -> Pool: server_api=options.server_api, ) - return self._settings.pool_class(address, monitor_pool_options, handshake=False) + return self._settings.pool_class( + address, monitor_pool_options, handshake=False, client_id=self._topology_id + ) def _error_message(self, selector: Callable[[Selection], Selection]) -> str: """Format an error message if server selection fails. @@ -931,3 +1002,16 @@ def _is_stale_server_description(current_sd: ServerDescription, new_sd: ServerDe if current_tv["processId"] != new_tv["processId"]: return False return current_tv["counter"] > new_tv["counter"] + + +def _filter_servers( + candidates: list[Server], deprioritized_servers: Optional[list[Server]] = None +) -> list[Server]: + """Filter out deprioritized servers from a list of server candidates.""" + if not deprioritized_servers: + return candidates + + filtered = [server for server in candidates if server not in deprioritized_servers] + + # If not possible to pick a prioritized server, return the original list + return filtered or candidates diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 141f74edf3..cc2330cbab 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -69,14 +69,13 @@ def __init__( ) -> None: """Representation of a deployment of MongoDB servers. - :Parameters: - - `topology_type`: initial type - - `server_descriptions`: dict of (address, ServerDescription) for + :param topology_type: initial type + :param server_descriptions: dict of (address, ServerDescription) for all seeds - - `replica_set_name`: replica set name or None - - `max_set_version`: greatest setVersion seen from a primary, or None - - `max_election_id`: greatest electionId seen from a primary, or None - - `topology_settings`: a TopologySettings + :param replica_set_name: replica set name or None + :param max_set_version: greatest setVersion seen from a primary, or None + :param max_election_id: greatest electionId seen from a primary, or None + :param topology_settings: a TopologySettings """ self._topology_type = topology_type self._replica_set_name = replica_set_name @@ -266,8 +265,14 @@ def srv_max_hosts(self) -> int: def _apply_local_threshold(self, selection: Optional[Selection]) -> list[ServerDescription]: if not selection: return [] + round_trip_times: list[float] = [] + for server in selection.server_descriptions: + if server.round_trip_time is None: + config_err_msg = f"round_trip_time for server {server.address} is unexpectedly None: {self}, servers: {selection.server_descriptions}" + raise ConfigurationError(config_err_msg) + round_trip_times.append(server.round_trip_time) # Round trip time in seconds. - fastest = min(cast(float, s.round_trip_time) for s in selection.server_descriptions) + fastest = min(round_trip_times) threshold = self._topology_settings.local_threshold_ms / 1000.0 return [ s @@ -283,12 +288,11 @@ def apply_selector( ) -> list[ServerDescription]: """List of servers matching the provided selector(s). - :Parameters: - - `selector`: a callable that takes a Selection as input and returns + :param selector: a callable that takes a Selection as input and returns a Selection as output. For example, an instance of a read preference from :mod:`~pymongo.read_preferences`. - - `address` (optional): A server address to select. - - `custom_selector` (optional): A callable that augments server + :param address: A server address to select. + :param custom_selector: A callable that augments server selection rules. Accepts a list of :class:`~pymongo.server_description.ServerDescription` objects and return a list of server descriptions that should be considered @@ -333,8 +337,7 @@ def has_readable_server(self, read_preference: _ServerMode = ReadPreference.PRIM """Does this topology have any readable servers available matching the given read preference? - :Parameters: - - `read_preference`: an instance of a read preference from + :param read_preference: an instance of a read preference from :mod:`~pymongo.read_preferences`. Defaults to :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. @@ -384,9 +387,8 @@ def updated_topology_description( ) -> TopologyDescription: """Return an updated copy of a TopologyDescription. - :Parameters: - - `topology_description`: the current TopologyDescription - - `server_description`: a new ServerDescription that resulted from + :param topology_description: the current TopologyDescription + :param server_description: a new ServerDescription that resulted from a hello call Called after attempting (successfully or not) to call hello on the @@ -489,9 +491,8 @@ def _updated_topology_description_srv_polling( ) -> TopologyDescription: """Return an updated copy of a TopologyDescription. - :Parameters: - - `topology_description`: the current TopologyDescription - - `seedlist`: a list of new seeds new ServerDescription that resulted from + :param topology_description: the current TopologyDescription + :param seedlist: a list of new seeds new ServerDescription that resulted from a hello call """ assert topology_description.topology_type in SRV_POLLING_TOPOLOGIES @@ -586,7 +587,6 @@ def _update_rs_from_primary( server.server_type is SERVER_TYPE.RSPrimary and server.address != server_description.address ): - # Reset old primary's type to Unknown. sds[server.address] = server.to_unknown() diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index d5292c1b54..4ebd3008c3 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -40,7 +40,7 @@ get_validated_options, ) from pymongo.errors import ConfigurationError, InvalidURI -from pymongo.srv_resolver import _HAVE_DNSPYTHON, _SrvResolver +from pymongo.srv_resolver import _have_dnspython, _SrvResolver from pymongo.typings import _Address if TYPE_CHECKING: @@ -56,8 +56,7 @@ def _unquoted_percent(s: str) -> bool: """Check for unescaped percent signs. - :Parameters: - - `s`: A string. `s` can have things like '%25', '%2525', + :param s: A string. `s` can have things like '%25', '%2525', and '%E2%85%A8' but cannot have unquoted percent like '%foo'. """ for i in range(len(s)): @@ -78,8 +77,7 @@ def parse_userinfo(userinfo: str) -> tuple[str, str]: Returns a 2-tuple containing the unescaped username followed by the unescaped password. - :Parameters: - - `userinfo`: A string of the form : + :param userinfo: A string of the form : """ if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): raise InvalidURI( @@ -103,10 +101,9 @@ def parse_ipv6_literal_host( Returns a 2-tuple of IPv6 literal followed by port where port is default_port if it wasn't specified in entity. - :Parameters: - - `entity`: A string that represents an IPv6 literal enclosed + :param entity: A string that represents an IPv6 literal enclosed in braces (e.g. '[::1]' or '[::1]:27017'). - - `default_port`: The port number to use when one wasn't + :param default_port: The port number to use when one wasn't specified in entity. """ if entity.find("]") == -1: @@ -125,10 +122,9 @@ def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Addr Returns a 2-tuple of host followed by port where port is default_port if it wasn't specified in the string. - :Parameters: - - `entity`: A host or host:port string where host could be a + :param entity: A host or host:port string where host could be a hostname or IP address. - - `default_port`: The port number to use when one wasn't + :param default_port: The port number to use when one wasn't specified in entity. """ host = entity @@ -192,8 +188,7 @@ def _handle_security_options(options: _CaseInsensitiveDictionary) -> _CaseInsens """Raise appropriate errors when conflicting TLS options are present in the options dictionary. - :Parameters: - - `options`: Instance of _CaseInsensitiveDictionary containing + :param options: Instance of _CaseInsensitiveDictionary containing MongoDB URI options. """ # Implicitly defined options must not be explicitly specified. @@ -247,8 +242,7 @@ def _handle_option_deprecations(options: _CaseInsensitiveDictionary) -> _CaseIns options dictionary. Removes deprecated option key, value pairs if the options dictionary is found to also have the renamed option. - :Parameters: - - `options`: Instance of _CaseInsensitiveDictionary containing + :param options: Instance of _CaseInsensitiveDictionary containing MongoDB URI options. """ for optname in list(options): @@ -286,8 +280,7 @@ def _normalize_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveD """Normalizes option names in the options dictionary by converting them to their internally-used names. - :Parameters: - - `options`: Instance of _CaseInsensitiveDictionary containing + :param options: Instance of _CaseInsensitiveDictionary containing MongoDB URI options. """ # Expand the tlsInsecure option. @@ -312,9 +305,8 @@ def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapp False then errors will be thrown for invalid options, otherwise they will be ignored and a warning will be issued. - :Parameters: - - `opts`: A dict of MongoDB URI options. - - `warn` (optional): If ``True`` then warnings will be logged and + :param opts: A dict of MongoDB URI options. + :param warn: If ``True`` then warnings will be logged and invalid options will be ignored. Otherwise invalid options will cause errors. """ @@ -327,13 +319,12 @@ def split_options( """Takes the options portion of a MongoDB URI, validates each option and returns the options in a dictionary. - :Parameters: - - `opt`: A string representing MongoDB URI options. - - `validate`: If ``True`` (the default), validate and normalize all + :param opt: A string representing MongoDB URI options. + :param validate: If ``True`` (the default), validate and normalize all options. - - `warn`: If ``False`` (the default), suppress all warnings raised + :param warn: If ``False`` (the default), suppress all warnings raised during validation of options. - - `normalize`: If ``True`` (the default), renames all options to their + :param normalize: If ``True`` (the default), renames all options to their internally-used names. """ and_idx = opts.find("&") @@ -375,9 +366,8 @@ def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> list[ Returns a set of 2-tuples containing the host name (or IP) followed by port number. - :Parameters: - - `hosts`: A string of the form host1[:port],host2[:port],... - - `default_port`: The port number to use when one wasn't specified + :param hosts: A string of the form host1[:port],host2[:port],... + :param default_port: The port number to use when one wasn't specified for a host. """ nodes = [] @@ -442,21 +432,20 @@ def parse_uri( If the URI scheme is "mongodb+srv://" DNS SRV and TXT lookups will be done to build nodelist and options. - :Parameters: - - `uri`: The MongoDB URI to parse. - - `default_port`: The port number to use when one wasn't specified + :param uri: The MongoDB URI to parse. + :param default_port: The port number to use when one wasn't specified for a host in the URI. - - `validate` (optional): If ``True`` (the default), validate and + :param validate: If ``True`` (the default), validate and normalize all options. Default: ``True``. - - `warn` (optional): When validating, if ``True`` then will warn + :param warn: When validating, if ``True`` then will warn the user then ignore any invalid options or values. If ``False``, validation will error when options are unsupported or values are invalid. Default: ``False``. - - `normalize` (optional): If ``True``, convert names of URI options + :param normalize: If ``True``, convert names of URI options to their internally-used names. Default: ``True``. - - `connect_timeout` (optional): The maximum time in milliseconds to + :param connect_timeout: The maximum time in milliseconds to wait for a response from the DNS server. - - `srv_service_name` (optional): A custom SRV service name + :param srv_service_name: A custom SRV service name .. versionchanged:: 4.6 The delimiting slash (``/``) between hosts and connection options is now optional. @@ -483,7 +472,7 @@ def parse_uri( is_srv = False scheme_free = uri[SCHEME_LEN:] elif uri.startswith(SRV_SCHEME): - if not _HAVE_DNSPYTHON: + if not _have_dnspython(): python_path = sys.executable or "python" raise ConfigurationError( 'The "dnspython" module must be ' @@ -505,16 +494,11 @@ def parse_uri( collection = None options = _CaseInsensitiveDictionary() - host_part, _, path_part = scheme_free.partition("/") - if not host_part: - host_part = path_part - path_part = "" - - if path_part: - dbase, _, opts = path_part.partition("?") + host_plus_db_part, _, opts = scheme_free.partition("?") + if "/" in host_plus_db_part: + host_part, _, dbase = host_plus_db_part.partition("/") else: - # There was no slash in scheme_free, check for a sole "?". - host_part, _, opts = host_part.partition("?") + host_part = host_plus_db_part if dbase: dbase = unquote_plus(dbase) diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index ab6629fbbc..591a126f8f 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -20,7 +20,7 @@ from pymongo.errors import ConfigurationError -# Moved here to avoid a circular import. +# Duplicated here to avoid a circular import. def validate_boolean(option: str, value: Any) -> bool: """Validates that 'value' is True or False.""" if isinstance(value, bool): @@ -31,27 +31,30 @@ def validate_boolean(option: str, value: Any) -> bool: class WriteConcern: """WriteConcern - :Parameters: - - `w`: (integer or string) Used with replication, write operations + :param w: (integer or string) Used with replication, write operations will block until they have been replicated to the specified number or tagged set of servers. `w=` always includes the replica set primary (e.g. w=3 means write to the primary and wait until replicated to **two** secondaries). **w=0 disables acknowledgement of write operations and can not be used with other write concern options.** - - `wtimeout`: (integer) Used in conjunction with `w`. Specify a value - in milliseconds to control how long to wait for write propagation - to complete. If replication does not complete in the given + :param wtimeout: (integer) **DEPRECATED** Used in conjunction with `w`. + Specify a value in milliseconds to control how long to wait for write + propagation to complete. If replication does not complete in the given timeframe, a timeout exception is raised. - - `j`: If ``True`` block until write operations have been committed + :param j: If ``True`` block until write operations have been committed to the journal. Cannot be used in combination with `fsync`. Write operations will fail with an exception if this option is used when the server is running without journaling. - - `fsync`: If ``True`` and the server is running without journaling, + :param fsync: If ``True`` and the server is running without journaling, blocks until the server has synced all data files to disk. If the server is running with journaling, this acts the same as the `j` option, blocking until write operations have been committed to the journal. Cannot be used in combination with `j`. + + + .. versionchanged:: 4.7 + Deprecated parameter ``wtimeout``, use :meth:`~pymongo.timeout`. """ __slots__ = ("__document", "__acknowledged", "__server_default") diff --git a/pyproject.toml b/pyproject.toml index 78925e6024..d208f6a439 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,14 +1,14 @@ [build-system] -requires = ["setuptools>=63.0"] -build-backend = "setuptools.build_meta" +requires = ["hatchling>1.24","setuptools>=65.0","hatch-requirements-txt>=0.4.1"] +build-backend = "hatchling.build" [project] name = "pymongo" -dynamic = ["version"] +dynamic = ["version", "dependencies", "optional-dependencies"] description = "Python driver for MongoDB " -readme = "README.rst" +readme = "README.md" license = {file="LICENSE"} -requires-python = ">=3.7" +requires-python = ">=3.8" authors = [ { name = "The MongoDB Python Team" }, ] @@ -30,7 +30,6 @@ classifiers = [ "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", @@ -39,66 +38,84 @@ classifiers = [ "Topic :: Database", "Typing :: Typed", ] -dependencies = [ - "dnspython>=1.16.0,<3.0.0", -] - -[project.optional-dependencies] -aws = [ - "pymongo-auth-aws<2.0.0", -] -encryption = [ - "pymongo[aws]", - "pymongocrypt>=1.6.0,<2.0.0", - "certifi;os.name=='nt' or sys_platform=='darwin'", -] -gssapi = [ - "pykerberos;os.name!='nt'", - "winkerberos>=0.5.0;os.name=='nt'" -] -# PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced -# a related feature we need. 17.2.0 fixes a bug -# in set_default_verify_paths we should really avoid. -# service_identity 18.1.0 introduced support for IP addr matching. -# Fallback to certifi on Windows if we can't load CA certs from the system -# store and just use certifi on macOS. -# https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths -ocsp = [ - "certifi;os.name=='nt' or sys_platform=='darwin'", - "pyopenssl>=17.2.0", - "requests<3.0.0", - "cryptography>=2.5", - "service_identity>=18.1.0", -] -snappy = [ - "python-snappy", -] -# PYTHON-3423 Removed in 4.3 but kept here to avoid pip warnings. -srv = [] -tls = [] -# PYTHON-2133 Removed in 4.0 but kept here to avoid pip warnings. -zstd = [ - "zstandard", -] -test = ["pytest>=7"] [project.urls] -Homepage = "http://github.com/mongodb/mongo-python-driver" +Homepage = "https://www.mongodb.org" +Documentation = "https://pymongo.readthedocs.io" +Source = "https://github.com/mongodb/mongo-python-driver" +Tracker = "https://jira.mongodb.org/projects/PYTHON/issues" + +# Used to call hatch_build.py +[tool.hatch.build.hooks.custom] -[tool.setuptools.dynamic] -version = {attr = "pymongo._version.__version__"} +[tool.hatch.version] +path = "pymongo/_version.py" +validate-bump = false -[tool.setuptools.packages.find] -include = ["bson","gridfs", "pymongo"] +[tool.hatch.build.targets.wheel] +packages = ["bson","gridfs", "pymongo"] -[tool.setuptools.package-data] -bson=["py.typed", "*.pyi"] -pymongo=["py.typed", "*.pyi"] -gridfs=["py.typed", "*.pyi"] +[tool.hatch.metadata.hooks.requirements_txt] +files = ["requirements.txt"] + +[tool.hatch.metadata.hooks.requirements_txt.optional-dependencies] +aws = ["requirements/aws.txt"] +docs = ["requirements/docs.txt"] +encryption = ["requirements/encryption.txt"] +gssapi = ["requirements/gssapi.txt"] +ocsp = ["requirements/ocsp.txt"] +snappy = ["requirements/snappy.txt"] +test = ["requirements/test.txt"] +zstd = ["requirements/zstd.txt"] + +[tool.pytest.ini_options] +minversion = "7" +addopts = ["-ra", "--strict-config", "--strict-markers", "--junitxml=xunit-results/TEST-results.xml"] +testpaths = ["test"] +log_cli_level = "INFO" +norecursedirs = ["test/*"] +faulthandler_timeout = 1500 +xfail_strict = true +filterwarnings = [ + "error", + # Internal warnings raised during tests. + "module:use an explicit session with no_cursor_timeout=True:UserWarning", + "module:serverselectiontimeoutms must be:UserWarning", + "module:Unsupported compressor:UserWarning", + "module:zlibcompressionlevel must be:UserWarning", + "module:Wire protocol compression with:UserWarning", + "module:GridIn property:DeprecationWarning", + "module:GridOut property:DeprecationWarning", + # TODO: Remove as part of PYTHON-3923. + "module:unclosed =1.16.0,<3.0.0 diff --git a/requirements/aws.txt b/requirements/aws.txt new file mode 100644 index 0000000000..06e30c11c3 --- /dev/null +++ b/requirements/aws.txt @@ -0,0 +1 @@ +pymongo-auth-aws>=1.1.0,<2.0.0 diff --git a/requirements/docs.txt b/requirements/docs.txt new file mode 100644 index 0000000000..ce8682c079 --- /dev/null +++ b/requirements/docs.txt @@ -0,0 +1,5 @@ +sphinx>=5.3,<8 +sphinx_rtd_theme>=2,<3 +readthedocs-sphinx-search~=0.3 +sphinxcontrib-shellcheck>=1,<2 +furo==2023.9.10 diff --git a/requirements/encryption.txt b/requirements/encryption.txt new file mode 100644 index 0000000000..bae6115eb5 --- /dev/null +++ b/requirements/encryption.txt @@ -0,0 +1,3 @@ +pymongo-auth-aws>=1.1.0,<2.0.0 +pymongocrypt>=1.6.0,<2.0.0 +certifi;os.name=='nt' or sys_platform=='darwin' diff --git a/requirements/gssapi.txt b/requirements/gssapi.txt new file mode 100644 index 0000000000..7f156b9cea --- /dev/null +++ b/requirements/gssapi.txt @@ -0,0 +1,2 @@ +pykerberos;os.name!='nt' +winkerberos>=0.5.0;os.name=='nt' diff --git a/requirements/ocsp.txt b/requirements/ocsp.txt new file mode 100644 index 0000000000..6570b0905a --- /dev/null +++ b/requirements/ocsp.txt @@ -0,0 +1,12 @@ +# PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced +# a related feature we need. 17.2.0 fixes a bug +# in set_default_verify_paths we should really avoid. +# service_identity 18.1.0 introduced support for IP addr matching. +# Fallback to certifi on Windows if we can't load CA certs from the system +# store and just use certifi on macOS. +# https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths +certifi;os.name=='nt' or sys_platform=='darwin' +pyopenssl>=17.2.0 +requests<3.0.0 +cryptography>=2.5 +service_identity>=18.1.0 diff --git a/requirements/snappy.txt b/requirements/snappy.txt new file mode 100644 index 0000000000..9bb71204b8 --- /dev/null +++ b/requirements/snappy.txt @@ -0,0 +1 @@ +python-snappy diff --git a/requirements/test.txt b/requirements/test.txt new file mode 100644 index 0000000000..91e898f3cb --- /dev/null +++ b/requirements/test.txt @@ -0,0 +1 @@ +pytest>=7 diff --git a/requirements/zstd.txt b/requirements/zstd.txt new file mode 100644 index 0000000000..864700d2b3 --- /dev/null +++ b/requirements/zstd.txt @@ -0,0 +1 @@ +zstandard diff --git a/sbom.json b/sbom.json new file mode 100644 index 0000000000..95b362f836 --- /dev/null +++ b/sbom.json @@ -0,0 +1,11 @@ +{ + "metadata": { + "timestamp": "2024-06-10T18:55:17.710940+00:00", + }, + "components": [], + "serialNumber": "urn:uuid:a6c08d96-55e1-4cdb-945c-0e21ced83e34", + "version": 1, + "$schema": "http://cyclonedx.org/schema/bom-1.5.schema.json", + "bomFormat": "CycloneDX", + "specVersion": "1.5" +} diff --git a/setup.py b/setup.py index a711e246bf..f371b3d75b 100644 --- a/setup.py +++ b/setup.py @@ -1,139 +1,8 @@ from __future__ import annotations -import os -import sys -import warnings +msg = ( + "PyMongo>=4.8 no longer supports building via setup.py, use python -m pip install instead. If " + "this is an editable install (-e) please upgrade to pip>=21.3 first: python -m pip install --upgrade pip" +) -# Hack to silence atexit traceback in some Python versions -try: - import multiprocessing # noqa: F401 -except ImportError: - pass - -from setuptools import setup -from setuptools.command.build_ext import build_ext -from setuptools.extension import Extension - - -class custom_build_ext(build_ext): - """Allow C extension building to fail. - - The C extension speeds up BSON encoding, but is not essential. - """ - - warning_message = """ -******************************************************************** -WARNING: %s could not -be compiled. No C extensions are essential for PyMongo to run, -although they do result in significant speed improvements. -%s - -Please see the installation docs for solutions to build issues: - -https://pymongo.readthedocs.io/en/stable/installation.html - -Here are some hints for popular operating systems: - -If you are seeing this message on Linux you probably need to -install GCC and/or the Python development package for your -version of Python. - -Debian and Ubuntu users should issue the following command: - - $ sudo apt-get install build-essential python-dev - -Users of Red Hat based distributions (RHEL, CentOS, Amazon Linux, -Oracle Linux, Fedora, etc.) should issue the following command: - - $ sudo yum install gcc python-devel - -If you are seeing this message on Microsoft Windows please install -PyMongo using pip. Modern versions of pip will install PyMongo -from binary wheels available on pypi. If you must install from -source read the documentation here: - -https://pymongo.readthedocs.io/en/stable/installation.html#installing-from-source-on-windows - -If you are seeing this message on macOS / OSX please install PyMongo -using pip. Modern versions of pip will install PyMongo from binary -wheels available on pypi. If wheels are not available for your version -of macOS / OSX, or you must install from source read the documentation -here: - -https://pymongo.readthedocs.io/en/stable/installation.html#osx -******************************************************************** -""" - - def run(self): - try: - build_ext.run(self) - except Exception: - if os.environ.get("PYMONGO_C_EXT_MUST_BUILD"): - raise - e = sys.exc_info()[1] - sys.stdout.write("%s\n" % str(e)) - warnings.warn( - self.warning_message - % ( - "Extension modules", - "There was an issue with your platform configuration - see above.", - ), - stacklevel=2, - ) - - def build_extension(self, ext): - name = ext.name - try: - build_ext.build_extension(self, ext) - except Exception: - if os.environ.get("PYMONGO_C_EXT_MUST_BUILD"): - raise - e = sys.exc_info()[1] - sys.stdout.write("%s\n" % str(e)) - warnings.warn( - self.warning_message - % ( - "The %s extension module" % (name,), # noqa: UP031 - "The output above this warning shows how the compilation failed.", - ), - stacklevel=2, - ) - - -ext_modules = [ - Extension( - "bson._cbson", - include_dirs=["bson"], - sources=["bson/_cbsonmodule.c", "bson/time64.c", "bson/buffer.c"], - ), - Extension( - "pymongo._cmessage", - include_dirs=["bson"], - sources=[ - "pymongo/_cmessagemodule.c", - "bson/_cbsonmodule.c", - "bson/time64.c", - "bson/buffer.c", - ], - ), -] - - -if "--no_ext" in sys.argv or os.environ.get("NO_EXT"): - try: - sys.argv.remove("--no_ext") - except ValueError: - pass - ext_modules = [] -elif sys.platform.startswith("java") or sys.platform == "cli" or "PyPy" in sys.version: - sys.stdout.write( - """ -*****************************************************\n -The optional C extensions are currently not supported\n -by this python implementation.\n -*****************************************************\n -""" - ) - ext_modules = [] - -setup(cmdclass={"build_ext": custom_build_ext}, ext_modules=ext_modules) # type:ignore +raise RuntimeError(msg) diff --git a/test/__init__.py b/test/__init__.py index cea27c01f7..c516838f47 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -51,7 +51,7 @@ from pymongo.hello import HelloCompat from pymongo.mongo_client import MongoClient from pymongo.server_api import ServerApi -from pymongo.ssl_support import HAVE_SSL, _ssl +from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] from pymongo.uri_parser import parse_uri if HAVE_SSL: @@ -67,6 +67,7 @@ # for a replica set. host = os.environ.get("DB_IP", "localhost") port = int(os.environ.get("DB_PORT", 27017)) +IS_SRV = "mongodb+srv" in host db_user = os.environ.get("DB_USER", "user") db_pwd = os.environ.get("DB_PASSWORD", "password") @@ -113,6 +114,10 @@ "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), } +AWS_CREDS_2 = { + "accessKeyId": os.environ.get("FLE_AWS_KEY2", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET2", ""), +} AZURE_CREDS = { "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), @@ -124,6 +129,9 @@ } KMIP_CREDS = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} +# Ensure Evergreen metadata doesn't result in truncation +os.environ.setdefault("MONGOB_LOG_MAX_DOCUMENT_LENGTH", "2000") + def is_server_resolvable(): """Returns True if 'server' is resolvable.""" @@ -269,6 +277,7 @@ def __init__(self): self.is_data_lake = False self.load_balancer = TEST_LOADBALANCER self.serverless = TEST_SERVERLESS + self._fips_enabled = None if self.load_balancer or self.serverless: self.default_client_options["loadBalanced"] = True if COMPRESSORS: @@ -281,6 +290,8 @@ def __init__(self): def client_options(self): """Return the MongoClient options for creating a duplicate client.""" opts = client_context.default_client_options.copy() + opts["host"] = host + opts["port"] = port if client_context.auth_enabled: opts["username"] = db_user opts["password"] = db_pwd @@ -342,8 +353,7 @@ def _init_client(self): if self.client is not None: # Return early when connected to dataLake as mongohoused does not # support the getCmdLineOpts command and is tested without TLS. - build_info: Any = self.client.admin.command("buildInfo") - if "dataLake" in build_info: + if os.environ.get("TEST_DATA_LAKE"): self.is_data_lake = True self.auth_enabled = True self.client = self._connect(host, port, username=db_user, password=db_pwd) @@ -378,7 +388,7 @@ def _init_client(self): self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: - if not self.serverless: + if not self.serverless and not IS_SRV: # See if db_user already exists. if not self._check_user_provided(): _create_user(self.client.admin, db_user, db_pwd) @@ -446,7 +456,7 @@ def _init_client(self): else: self.server_parameters = self.client.admin.command("getParameter", "*") assert self.cmd_line is not None - if "enableTestCommands=1" in self.cmd_line["argv"]: + if self.server_parameters["enableTestCommands"]: self.test_commands_enabled = True elif "parsed" in self.cmd_line: params = self.cmd_line["parsed"].get("setParameter", []) @@ -482,14 +492,14 @@ def connection_attempt_info(self): @property def host(self): - if self.is_rs: + if self.is_rs and not IS_SRV: primary = self.client.primary return str(primary[0]) if primary is not None else host return host @property def port(self): - if self.is_rs: + if self.is_rs and not IS_SRV: primary = self.client.primary return primary[1] if primary is not None else port return port @@ -514,6 +524,21 @@ def storage_engine(self): # Raised if self.server_status is None. return None + @property + def fips_enabled(self): + if self._fips_enabled is not None: + return self._fips_enabled + try: + subprocess.check_call(["fips-mode-setup", "--is-enabled"]) + self._fips_enabled = True + except (subprocess.SubprocessError, FileNotFoundError): + self._fips_enabled = False + return self._fips_enabled + + def check_auth_type(self, auth_type): + auth_mechs = self.server_parameters.get("authenticationMechanisms", []) + return auth_type in auth_mechs + def _check_user_provided(self): """Return True if db_user/db_password is already an admin user.""" client: MongoClient = pymongo.MongoClient( @@ -657,6 +682,12 @@ def require_auth(self, func): lambda: self.auth_enabled, "Authentication is not enabled on the server", func=func ) + def require_no_fips(self, func): + """Run a test only if the host does not have FIPS enabled.""" + return self._require( + lambda: not self.fips_enabled, "Test cannot run on a FIPS-enabled host", func=func + ) + def require_no_auth(self, func): """Run a test only if the server is running without auth enabled.""" return self._require( diff --git a/test/auth/legacy/connection-string.json b/test/auth/legacy/connection-string.json index 0463a5141e..57fd9d4a11 100644 --- a/test/auth/legacy/connection-string.json +++ b/test/auth/legacy/connection-string.json @@ -446,9 +446,8 @@ } }, { - "description": "should recognise the mechanism and request callback (MONGODB-OIDC)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", - "callback": ["oidcRequest"], + "description": "should recognise the mechanism with test environment (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:test", "valid": true, "credential": { "username": null, @@ -456,14 +455,13 @@ "source": "$external", "mechanism": "MONGODB-OIDC", "mechanism_properties": { - "REQUEST_TOKEN_CALLBACK": true + "ENVIRONMENT": "test" } } }, { - "description": "should recognise the mechanism when auth source is explicitly specified and with request callback (MONGODB-OIDC)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authSource=$external", - "callback": ["oidcRequest"], + "description": "should recognise the mechanism when auth source is explicitly specified and with environment (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authSource=$external&authMechanismProperties=ENVIRONMENT:test", "valid": true, "credential": { "username": null, @@ -471,83 +469,162 @@ "source": "$external", "mechanism": "MONGODB-OIDC", "mechanism_properties": { - "REQUEST_TOKEN_CALLBACK": true + "ENVIRONMENT": "test" } } }, { - "description": "should recognise the mechanism and username with request callback (MONGODB-OIDC)", - "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC", - "callback": ["oidcRequest"], + "description": "should throw an exception if username and password is specified for test environment (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:test", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if username is specified for test environment (MONGODB-OIDC)", + "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC&ENVIRONMENT:test", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if specified environment is not supported (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:invalid", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if neither provider nor callbacks specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "valid": false, + "credential": null + }, + { + "description": "should recognise the mechanism with azure provider (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:foo", "valid": true, "credential": { - "username": "principalName", + "username": null, "password": null, "source": "$external", "mechanism": "MONGODB-OIDC", "mechanism_properties": { - "REQUEST_TOKEN_CALLBACK": true + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "foo" } } }, { - "description": "should recognise the mechanism with aws device (MONGODB-OIDC)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=PROVIDER_NAME:aws", + "description": "should accept a username with azure provider (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:foo", "valid": true, "credential": { - "username": null, + "username": "user", "password": null, "source": "$external", "mechanism": "MONGODB-OIDC", "mechanism_properties": { - "PROVIDER_NAME": "aws" + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "foo" } } }, { - "description": "should recognise the mechanism when auth source is explicitly specified and with aws device (MONGODB-OIDC)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authSource=$external&authMechanismProperties=PROVIDER_NAME:aws", + "description": "should accept a url-encoded TOKEN_RESOURCE (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:mongodb%3A%2F%2Ftest-cluster", "valid": true, "credential": { - "username": null, + "username": "user", "password": null, "source": "$external", "mechanism": "MONGODB-OIDC", "mechanism_properties": { - "PROVIDER_NAME": "aws" + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "mongodb://test-cluster" } } }, { - "description": "should throw an exception if username and password are specified (MONGODB-OIDC)", - "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC", - "callback": ["oidcRequest"], - "valid": false, - "credential": null + "description": "should accept an un-encoded TOKEN_RESOURCE (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:mongodb://test-cluster", + "valid": true, + "credential": { + "username": "user", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "mongodb://test-cluster" + } + } }, { - "description": "should throw an exception if username and deviceName are specified (MONGODB-OIDC)", - "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC&PROVIDER_NAME:gcp", + "description": "should handle a complicated url-encoded TOKEN_RESOURCE (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:abcd%25ef%3Ag%26hi", + "valid": true, + "credential": { + "username": "user", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "abcd%ef:g&hi" + } + } + }, + { + "description": "should url-encode a TOKEN_RESOURCE (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:a$b", + "valid": true, + "credential": { + "username": "user", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "a$b" + } + } + }, + { + "description": "should accept a username and throw an error for a password with azure provider (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:foo", "valid": false, "credential": null }, { - "description": "should throw an exception if specified deviceName is not supported (MONGODB-OIDC)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=PROVIDER_NAME:unexisted", + "description": "should throw an exception if no token audience is given for azure provider (MONGODB-OIDC)", + "uri": "mongodb://username@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure", "valid": false, "credential": null }, { - "description": "should throw an exception if neither deviceName nor callback specified (MONGODB-OIDC)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "description": "should recognise the mechanism with gcp provider (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:gcp,TOKEN_RESOURCE:foo", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "gcp", + "TOKEN_RESOURCE": "foo" + } + } + }, + { + "description": "should throw an error for a username and password with gcp provider (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:gcp,TOKEN_RESOURCE:foo", "valid": false, "credential": null }, { - "description": "should throw an exception when unsupported auth property is specified (MONGODB-OIDC)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=UnsupportedProperty:unexisted", + "description": "should throw an error if not TOKEN_RESOURCE with gcp provider (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:gcp", "valid": false, "credential": null } ] -} +} \ No newline at end of file diff --git a/test/auth/unified/mongodb-oidc-no-retry.json b/test/auth/unified/mongodb-oidc-no-retry.json new file mode 100644 index 0000000000..0a8658455e --- /dev/null +++ b/test/auth/unified/mongodb-oidc-no-retry.json @@ -0,0 +1,422 @@ +{ + "description": "MONGODB-OIDC authentication with retry disabled", + "schemaVersion": "1.19", + "runOnRequirements": [ + { + "minServerVersion": "7.0", + "auth": true, + "authMechanism": "MONGODB-OIDC", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client0", + "uriOptions": { + "authMechanism": "MONGODB-OIDC", + "authMechanismProperties": { + "$$placeholder": 1 + }, + "retryReads": false, + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collName" + } + } + ], + "initialData": [ + { + "collectionName": "collName", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "A read operation should succeed", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "A write operation should succeed", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Read commands should reauthenticate and retry when a ReauthenticationRequired error happens", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Write commands should reauthenticate and retry when a ReauthenticationRequired error happens", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Handshake with cached token should use speculative authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslStart" + ], + "errorCode": 18 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Handshake without cached token should not use speculative authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslStart" + ], + "errorCode": 18 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + }, + "expectError": { + "errorCode": 18 + } + } + ] + } + ] +} diff --git a/test/auth/unified/reauthenticate_with_retry.json b/test/auth/unified/reauthenticate_with_retry.json deleted file mode 100644 index ef110562ed..0000000000 --- a/test/auth/unified/reauthenticate_with_retry.json +++ /dev/null @@ -1,191 +0,0 @@ -{ - "description": "reauthenticate_with_retry", - "schemaVersion": "1.12", - "runOnRequirements": [ - { - "minServerVersion": "6.3", - "auth": true - } - ], - "createEntities": [ - { - "client": { - "id": "client0", - "uriOptions": { - "retryReads": true, - "retryWrites": true - }, - "observeEvents": [ - "commandStartedEvent", - "commandSucceededEvent", - "commandFailedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "db" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "collName" - } - } - ], - "initialData": [ - { - "collectionName": "collName", - "databaseName": "db", - "documents": [] - } - ], - "tests": [ - { - "description": "Read command should reauthenticate when receive ReauthenticationRequired error code and retryReads=true", - "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "client0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 391 - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": {} - }, - "object": "collection0", - "expectResult": [] - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "find": "collName", - "filter": {} - } - } - }, - { - "commandFailedEvent": { - "commandName": "find" - } - }, - { - "commandStartedEvent": { - "command": { - "find": "collName", - "filter": {} - } - } - }, - { - "commandSucceededEvent": { - "commandName": "find" - } - } - ] - } - ] - }, - { - "description": "Write command should reauthenticate when receive ReauthenticationRequired error code and retryWrites=true", - "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "client0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 391 - } - } - } - }, - { - "name": "insertOne", - "object": "collection0", - "arguments": { - "document": { - "_id": 1, - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "insert": "collName", - "documents": [ - { - "_id": 1, - "x": 1 - } - ] - } - } - }, - { - "commandFailedEvent": { - "commandName": "insert" - } - }, - { - "commandStartedEvent": { - "command": { - "insert": "collName", - "documents": [ - { - "_id": 1, - "x": 1 - } - ] - } - } - }, - { - "commandSucceededEvent": { - "commandName": "insert" - } - } - ] - } - ] - } - ] -} diff --git a/test/auth/unified/reauthenticate_without_retry.json b/test/auth/unified/reauthenticate_without_retry.json deleted file mode 100644 index 6fded47634..0000000000 --- a/test/auth/unified/reauthenticate_without_retry.json +++ /dev/null @@ -1,191 +0,0 @@ -{ - "description": "reauthenticate_without_retry", - "schemaVersion": "1.12", - "runOnRequirements": [ - { - "minServerVersion": "6.3", - "auth": true - } - ], - "createEntities": [ - { - "client": { - "id": "client0", - "uriOptions": { - "retryReads": false, - "retryWrites": false - }, - "observeEvents": [ - "commandStartedEvent", - "commandSucceededEvent", - "commandFailedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "db" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "collName" - } - } - ], - "initialData": [ - { - "collectionName": "collName", - "databaseName": "db", - "documents": [] - } - ], - "tests": [ - { - "description": "Read command should reauthenticate when receive ReauthenticationRequired error code and retryReads=false", - "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "client0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 391 - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": {} - }, - "object": "collection0", - "expectResult": [] - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "find": "collName", - "filter": {} - } - } - }, - { - "commandFailedEvent": { - "commandName": "find" - } - }, - { - "commandStartedEvent": { - "command": { - "find": "collName", - "filter": {} - } - } - }, - { - "commandSucceededEvent": { - "commandName": "find" - } - } - ] - } - ] - }, - { - "description": "Write command should reauthenticate when receive ReauthenticationRequired error code and retryWrites=false", - "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "client0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 391 - } - } - } - }, - { - "name": "insertOne", - "object": "collection0", - "arguments": { - "document": { - "_id": 1, - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "insert": "collName", - "documents": [ - { - "_id": 1, - "x": 1 - } - ] - } - } - }, - { - "commandFailedEvent": { - "commandName": "insert" - } - }, - { - "commandStartedEvent": { - "command": { - "insert": "collName", - "documents": [ - { - "_id": 1, - "x": 1 - } - ] - } - } - }, - { - "commandSucceededEvent": { - "commandName": "insert" - } - } - ] - } - ] - } - ] -} diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index d0bb41b739..3e5dcec563 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -60,8 +60,13 @@ def test_connect_uri(self): def setup_cache(self): if os.environ.get("AWS_ACCESS_KEY_ID", None) or "@" in self.uri: self.skipTest("Not testing cached credentials") - if not hasattr(auth, "set_cached_credentials"): - self.skipTest("Cached credentials not available") + + # Make a connection to ensure that we enable caching. + client = MongoClient(self.uri) + client.get_database().test.find_one() + client.close() + + self.assertTrue(auth.get_use_cached_credentials()) # Ensure cleared credentials. auth.set_cached_credentials(None) diff --git a/test/auth_oidc/test_auth_oidc.py b/test/auth_oidc/test_auth_oidc.py index 29de512da7..c7614fa0c3 100644 --- a/test/auth_oidc/test_auth_oidc.py +++ b/test/auth_oidc/test_auth_oidc.py @@ -17,61 +17,69 @@ import os import sys +import threading import time import unittest +import warnings from contextlib import contextmanager +from pathlib import Path from typing import Dict sys.path[0:0] = [""] +import pprint +from test.unified_format import generate_test_classes from test.utils import EventListener from bson import SON from pymongo import MongoClient -from pymongo.auth import _AUTH_MAP, _authenticate_oidc +from pymongo._azure_helpers import _get_azure_response +from pymongo._gcp_helpers import _get_gcp_response +from pymongo.auth_oidc import OIDCCallback, OIDCCallbackContext, OIDCCallbackResult from pymongo.cursor import CursorType -from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.errors import AutoReconnect, ConfigurationError, OperationFailure from pymongo.hello import HelloCompat from pymongo.operations import InsertOne +from pymongo.uri_parser import parse_uri -# Force MONGODB-OIDC to be enabled. -_AUTH_MAP["MONGODB-OIDC"] = _authenticate_oidc # type:ignore +ROOT = Path(__file__).parent.parent.resolve() +TEST_PATH = ROOT / "auth" / "unified" +ENVIRON = os.environ.get("OIDC_ENV", "test") +DOMAIN = os.environ.get("OIDC_DOMAIN", "") +TOKEN_DIR = os.environ.get("OIDC_TOKEN_DIR", "") +TOKEN_FILE = os.environ.get("OIDC_TOKEN_FILE", "") +# Generate unified tests. +globals().update(generate_test_classes(str(TEST_PATH), module=__name__)) -class TestAuthOIDC(unittest.TestCase): - uri: str +class OIDCTestBase(unittest.TestCase): @classmethod def setUpClass(cls): cls.uri_single = os.environ["MONGODB_URI_SINGLE"] - cls.uri_multiple = os.environ["MONGODB_URI_MULTI"] + cls.uri_multiple = os.environ.get("MONGODB_URI_MULTI") cls.uri_admin = os.environ["MONGODB_URI"] - cls.token_dir = os.environ["OIDC_TOKEN_DIR"] def setUp(self): self.request_called = 0 - def create_request_cb(self, username="test_user1", sleep=0): - - token_file = os.path.join(self.token_dir, username).replace(os.sep, "/") - - def request_token(server_info, context): - # Validate the info. - self.assertIn("issuer", server_info) - self.assertIn("clientId", server_info) - - # Validate the timeout. - timeout_seconds = context["timeout_seconds"] - self.assertEqual(timeout_seconds, 60 * 5) + def get_token(self, username=None): + """Get a token for the current provider.""" + if ENVIRON == "test": + if username is None: + token_file = TOKEN_FILE + else: + token_file = os.path.join(TOKEN_DIR, username) with open(token_file) as fid: - token = fid.read() - resp = {"access_token": token, "refresh_token": token} - - time.sleep(sleep) - self.request_called += 1 - return resp - - return request_token + return fid.read() + elif ENVIRON == "azure": + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + return _get_azure_response(token_aud, username)["access_token"] + elif ENVIRON == "gcp": + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + return _get_gcp_response(token_aud, username)["access_token"] @contextmanager def fail_point(self, command_args): @@ -84,156 +92,281 @@ def fail_point(self, command_args): finally: client.admin.command("configureFailPoint", cmd_on["configureFailPoint"], mode="off") - def test_connect_request_callback_single_implicit_username(self): - request_token = self.create_request_cb() - props: Dict = {"request_token_callback": request_token} - client = MongoClient(self.uri_single, authmechanismproperties=props) + +class TestAuthOIDCHuman(OIDCTestBase): + uri: str + + @classmethod + def setUpClass(cls): + if ENVIRON != "test": + raise unittest.SkipTest("Human workflows are only tested with the test environment") + if DOMAIN is None: + raise ValueError("Missing OIDC_DOMAIN") + super().setUpClass() + + def setUp(self): + self.refresh_present = 0 + super().setUp() + + def create_request_cb(self, username="test_user1", sleep=0): + def request_token(context: OIDCCallbackContext): + # Validate the info. + self.assertIsInstance(context.idp_info.issuer, str) + if context.idp_info.clientId is not None: + self.assertIsInstance(context.idp_info.clientId, str) + + # Validate the timeout. + timeout_seconds = context.timeout_seconds + self.assertEqual(timeout_seconds, 60 * 5) + + if context.refresh_token: + self.refresh_present += 1 + + token = self.get_token(username) + resp = OIDCCallbackResult(access_token=token, refresh_token=token) + + time.sleep(sleep) + self.request_called += 1 + return resp + + class Inner(OIDCCallback): + def fetch(self, context): + return request_token(context) + + return Inner() + + def create_client(self, *args, **kwargs): + username = kwargs.get("username", "test_user1") + if kwargs.get("username") in ["test_user1", "test_user2"]: + kwargs["username"] = f"{username}@{DOMAIN}" + request_cb = kwargs.pop("request_cb", self.create_request_cb(username=username)) + props = kwargs.pop("authmechanismproperties", {"OIDC_HUMAN_CALLBACK": request_cb}) + kwargs["retryReads"] = False + if not len(args): + args = [self.uri_single] + + return MongoClient(*args, authmechanismproperties=props, **kwargs) + + def test_1_1_single_principal_implicit_username(self): + # Create default OIDC client with authMechanism=MONGODB-OIDC. + client = self.create_client() + # Perform a find operation that succeeds. client.test.test.find_one() + # Close the client. client.close() - def test_connect_request_callback_single_explicit_username(self): - request_token = self.create_request_cb() - props: Dict = {"request_token_callback": request_token} - client = MongoClient(self.uri_single, username="test_user1", authmechanismproperties=props) + def test_1_2_single_principal_explicit_username(self): + # Create a client with MONGODB_URI_SINGLE, a username of test_user1, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(username="test_user1") + # Perform a find operation that succeeds. client.test.test.find_one() + # Close the client.. client.close() - def test_connect_request_callback_multiple_principal_user1(self): - request_token = self.create_request_cb() - props: Dict = {"request_token_callback": request_token} - client = MongoClient( - self.uri_multiple, username="test_user1", authmechanismproperties=props - ) + def test_1_3_multiple_principal_user_1(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a client with MONGODB_URI_MULTI, a username of test_user1, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(self.uri_multiple, username="test_user1") + # Perform a find operation that succeeds. client.test.test.find_one() + # Close the client. client.close() - def test_connect_request_callback_multiple_principal_user2(self): - request_token = self.create_request_cb("test_user2") - props: Dict = {"request_token_callback": request_token} - client = MongoClient( - self.uri_multiple, username="test_user2", authmechanismproperties=props - ) + def test_1_4_multiple_principal_user_2(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a human callback that reads in the generated test_user2 token file. + # Create a client with MONGODB_URI_MULTI, a username of test_user2, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(self.uri_multiple, username="test_user2") + # Perform a find operation that succeeds. client.test.test.find_one() + # Close the client. client.close() - def test_connect_request_callback_multiple_no_username(self): - request_token = self.create_request_cb() - props: Dict = {"request_token_callback": request_token} - client = MongoClient(self.uri_multiple, authmechanismproperties=props) + def test_1_5_multiple_principal_no_user(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a client with MONGODB_URI_MULTI, no username, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(self.uri_multiple) + # Assert that a find operation fails. with self.assertRaises(OperationFailure): client.test.test.find_one() + # Close the client. client.close() - def test_allowed_hosts_blocked(self): + def test_1_6_allowed_hosts_blocked(self): + # Create a default OIDC client, with an ALLOWED_HOSTS that is an empty list. request_token = self.create_request_cb() - props: Dict = {"request_token_callback": request_token, "allowed_hosts": []} - client = MongoClient(self.uri_single, authmechanismproperties=props) + props: Dict = {"OIDC_HUMAN_CALLBACK": request_token, "ALLOWED_HOSTS": []} + client = self.create_client(authmechanismproperties=props) + # Assert that a find operation fails with a client-side error. with self.assertRaises(ConfigurationError): client.test.test.find_one() + # Close the client. client.close() - props: Dict = {"request_token_callback": request_token, "allowed_hosts": ["example.com"]} - client = MongoClient( - self.uri_single + "&ignored=example.com", authmechanismproperties=props, connect=False - ) + # Create a client that uses the URL mongodb://localhost/?authMechanism=MONGODB-OIDC&ignored=example.com, + # a human callback, and an ALLOWED_HOSTS that contains ["example.com"]. + props: Dict = { + "OIDC_HUMAN_CALLBACK": request_token, + "ALLOWED_HOSTS": ["example.com"], + } + with warnings.catch_warnings(): + warnings.simplefilter("default") + client = self.create_client( + self.uri_single + "&ignored=example.com", + authmechanismproperties=props, + connect=False, + ) + # Assert that a find operation fails with a client-side error. with self.assertRaises(ConfigurationError): client.test.test.find_one() + # Close the client. client.close() - def test_valid_request_token_callback(self): - request_cb = self.create_request_cb() - - props: Dict = { - "request_token_callback": request_cb, - } - client = MongoClient(self.uri_single, authmechanismproperties=props) + def test_1_7_allowed_hosts_in_connection_string_ignored(self): + # Create an OIDC configured client with the connection string: `mongodb+srv://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22example.com%22%5D` and a Human Callback. + # Assert that the creation of the client raises a configuration error. + uri = "mongodb+srv://example.com?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22example.com%22%5D" + with self.assertRaises(ConfigurationError), warnings.catch_warnings(): + warnings.simplefilter("ignore") + _ = MongoClient( + uri, authmechanismproperties=dict(OIDC_HUMAN_CALLBACK=self.create_request_cb()) + ) + + def test_1_8_machine_idp_human_callback(self): + if not os.environ.get("OIDC_IS_LOCAL"): + raise unittest.SkipTest("Test Requires Local OIDC server") + # Create a client with MONGODB_URI_SINGLE, a username of test_machine, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(username="test_machine") + # Perform a find operation that succeeds. client.test.test.find_one() + # Close the client. client.close() - client = MongoClient(self.uri_single, authmechanismproperties=props) + def test_2_1_valid_callback_inputs(self): + # Create a MongoClient with a human callback that validates its inputs and returns a valid access token. + client = self.create_client() + # Perform a find operation that succeeds. Verify that the human callback was called with the appropriate inputs, including the timeout parameter if possible. + # Ensure that there are no unexpected fields. client.test.test.find_one() + # Close the client. client.close() - def test_request_callback_returns_null(self): - def request_token_null(a, b): - return None + def test_2_2_callback_returns_missing_data(self): + # Create a MongoClient with a human callback that returns data not conforming to the OIDCCredential with missing fields. + class CustomCB(OIDCCallback): + def fetch(self, ctx): + return dict() - props: Dict = {"request_token_callback": request_token_null} - client = MongoClient(self.uri_single, authMechanismProperties=props) + client = self.create_client(request_cb=CustomCB()) + # Perform a find operation that fails. with self.assertRaises(ValueError): client.test.test.find_one() + # Close the client. client.close() - def test_request_callback_invalid_result(self): - def request_token_invalid(a, b): - return {} + def test_2_3_refresh_token_is_passed_to_the_callback(self): + # Create a MongoClient with a human callback that checks for the presence of a refresh token. + client = self.create_client() - props: Dict = {"request_token_callback": request_token_invalid} - client = MongoClient(self.uri_single, authMechanismProperties=props) - with self.assertRaises(ValueError): + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Set a fail point for ``find`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that succeeds. client.test.test.find_one() - client.close() - def request_cb_extra_value(server_info, context): - result = self.create_request_cb()(server_info, context) - result["foo"] = "bar" - return result + # Assert that the callback has been called twice. + self.assertEqual(self.request_called, 2) - props: Dict = {"request_token_callback": request_cb_extra_value} - client = MongoClient(self.uri_single, authMechanismProperties=props) - with self.assertRaises(ValueError): - client.test.test.find_one() - client.close() + # Assert that the refresh token was used once. + self.assertEqual(self.refresh_present, 1) - def test_speculative_auth_success(self): - request_token = self.create_request_cb() + def test_3_1_uses_speculative_authentication_if_there_is_a_cached_token(self): + # Create a client with a human callback that returns a valid token. + client = self.create_client() - # Create a client with a request callback that returns a valid token. - props: Dict = {"request_token_callback": request_token} - client = MongoClient(self.uri_single, authmechanismproperties=props) + # Set a fail point for ``find`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391, "closeConnection": True}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(AutoReconnect): + client.test.test.find_one() - # Set a fail point for saslStart commands. + # Set a fail point for ``saslStart`` commands. with self.fail_point( { - "mode": {"times": 2}, + "mode": {"times": 1}, "data": {"failCommands": ["saslStart"], "errorCode": 18}, } ): - # Perform a find operation. + # Perform a ``find`` operation that succeeds client.test.test.find_one() # Close the client. client.close() - def test_reauthenticate_succeeds(self): - listener = EventListener() + def test_3_2_does_not_use_speculative_authentication_if_there_is_no_cached_token(self): + # Create a ``MongoClient`` with a human callback that returns a valid token + client = self.create_client() - # Create request callback that returns valid credentials. - request_cb = self.create_request_cb() + # Set a fail point for ``saslStart`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() - # Create a client with the callback. - props: Dict = {"request_token_callback": request_cb} - client = MongoClient( - self.uri_single, event_listeners=[listener], authmechanismproperties=props - ) + # Close the client. + client.close() - # Perform a find operation. + def test_4_1_reauthenticate_succeeds(self): + # Create a default OIDC client and add an event listener. + # The following assumes that the driver does not emit saslStart or saslContinue events. + # If the driver does emit those events, ignore/filter them for the purposes of this test. + listener = EventListener() + client = self.create_client(event_listeners=[listener]) + + # Perform a find operation that succeeds. client.test.test.find_one() - # Assert that the request callback has been called once. + # Assert that the human callback has been called once. self.assertEqual(self.request_called, 1) + # Clear the listener state if possible. listener.reset() + # Force a reauthenication using a fail point. with self.fail_point( { "mode": {"times": 1}, "data": {"failCommands": ["find"], "errorCode": 391}, } ): - # Perform a find operation. + # Perform another find operation that succeeds. client.test.test.find_one() + # Assert that the human callback has been called twice. + self.assertEqual(self.request_called, 2) + + # Assert that the ordering of list started events is [find, find]. + # Note that if the listener stat could not be cleared then there will be an extra find command. started_events = [ i.command_name for i in listener.started_events if not i.command_name.startswith("sasl") ] @@ -253,89 +386,107 @@ def test_reauthenticate_succeeds(self): "find", ], ) + # Assert that the list of command succeeded events is [find]. self.assertEqual(succeeded_events, ["find"]) + # Assert that a find operation failed once during the command execution. self.assertEqual(failed_events, ["find"]) - - # Assert that the request callback has been called twice. - self.assertEqual(self.request_called, 2) + # Close the client. client.close() - def test_reauthenticate_succeeds_no_refresh(self): + def test_4_2_reauthenticate_succeeds_no_refresh(self): + # Create a default OIDC client with a human callback that does not return a refresh token. cb = self.create_request_cb() - def request_cb(*args, **kwargs): - result = cb(*args, **kwargs) - del result["refresh_token"] - return result + class CustomRequest(OIDCCallback): + def fetch(self, *args, **kwargs): + result = cb.fetch(*args, **kwargs) + result.refresh_token = None + return result - # Create a client with the callback. - props: Dict = {"request_token_callback": request_cb} - client = MongoClient(self.uri_single, authmechanismproperties=props) + client = self.create_client(request_cb=CustomRequest()) - # Perform a find operation. + # Perform a find operation that succeeds. client.test.test.find_one() - # Assert that the request callback has been called once. + # Assert that the human callback has been called once. self.assertEqual(self.request_called, 1) + # Force a reauthenication using a fail point. with self.fail_point( { "mode": {"times": 1}, "data": {"failCommands": ["find"], "errorCode": 391}, } ): - # Perform a find operation. + # Perform a find operation that succeeds. client.test.test.find_one() - # Assert that the request callback has been called twice. + # Assert that the human callback has been called twice. self.assertEqual(self.request_called, 2) + # Close the client. client.close() - def test_reauthenticate_succeeds_after_refresh_fails(self): + def test_4_3_reauthenticate_succeeds_after_refresh_fails(self): + # Create a default OIDC client with a human callback that returns an invalid refresh token + cb = self.create_request_cb() - # Create request callback that returns valid credentials. - request_cb = self.create_request_cb() + class CustomRequest(OIDCCallback): + def fetch(self, *args, **kwargs): + result = cb.fetch(*args, **kwargs) + result.refresh_token = "bad" + return result - # Create a client with the callback. - props: Dict = {"request_token_callback": request_cb} - client = MongoClient(self.uri_single, authmechanismproperties=props) + client = self.create_client(request_cb=CustomRequest()) - # Perform a find operation. + # Perform a find operation that succeeds. client.test.test.find_one() - # Assert that the request callback has been called once. + # Assert that the human callback has been called once. self.assertEqual(self.request_called, 1) + # Force a reauthenication using a fail point. with self.fail_point( { - "mode": {"times": 2}, - "data": {"failCommands": ["find", "saslContinue"], "errorCode": 391}, + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, } ): - # Perform a find operation. + # Perform a find operation that succeeds. client.test.test.find_one() - # Assert that the request callback has been called three times. - self.assertEqual(self.request_called, 3) + # Assert that the human callback has been called 2 times. + self.assertEqual(self.request_called, 2) - def test_reauthenticate_fails(self): + # Close the client. + client.close() - # Create request callback that returns valid credentials. - request_cb = self.create_request_cb() + def test_4_4_reauthenticate_fails(self): + # Create a default OIDC client with a human callback that returns invalid refresh tokens and + # Returns invalid access tokens after the first access. + cb = self.create_request_cb() - # Create a client with the callback. - props: Dict = {"request_token_callback": request_cb} - client = MongoClient(self.uri_single, authmechanismproperties=props) + class CustomRequest(OIDCCallback): + fetch_called = 0 - # Perform a find operation. - client.test.test.find_one() + def fetch(self, *args, **kwargs): + self.fetch_called += 1 + result = cb.fetch(*args, **kwargs) + result.refresh_token = "bad" + if self.fetch_called > 1: + result.access_token = "bad" + return result - # Assert that the request callback has been called once. + client = self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds (to force a speculative auth). + client.test.test.find_one() + # Assert that the human callback has been called once. self.assertEqual(self.request_called, 1) + # Force a reauthentication using a failCommand. with self.fail_point( { - "mode": {"times": 2}, + "mode": {"times": 1}, "data": {"failCommands": ["find"], "errorCode": 391}, } ): @@ -343,16 +494,82 @@ def test_reauthenticate_fails(self): with self.assertRaises(OperationFailure): client.test.test.find_one() - # Assert that the request callback has been called twice. - self.assertEqual(self.request_called, 2) + # Assert that the human callback has been called three times. + self.assertEqual(self.request_called, 3) + + # Close the client. client.close() - def test_reauthenticate_succeeds_bulk_write(self): + def test_request_callback_returns_null(self): + class RequestTokenNull(OIDCCallback): + def fetch(self, a): + return None + + client = self.create_client(request_cb=RequestTokenNull()) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_request_callback_invalid_result(self): + class CallbackInvalidToken(OIDCCallback): + def fetch(self, a): + return {} + + client = self.create_client(request_cb=CallbackInvalidToken()) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_reauthentication_succeeds_multiple_connections(self): request_cb = self.create_request_cb() # Create a client with the callback. - props: Dict = {"request_token_callback": request_cb} - client = MongoClient(self.uri_single, authmechanismproperties=props) + client1 = self.create_client(request_cb=request_cb) + client2 = self.create_client(request_cb=request_cb) + + # Perform an insert operation. + client1.test.test.insert_many([{"a": 1}, {"a": 1}]) + client2.test.test.find_one() + self.assertEqual(self.request_called, 2) + + # Use the same authenticator for both clients + # to simulate a race condition with separate connections. + # We should only see one extra callback despite both connections + # needing to reauthenticate. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + client1.test.test.find_one() + client2.test.test.find_one() + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + client1.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + client2.test.test.find_one() + + self.assertEqual(self.request_called, 3) + client1.close() + client2.close() + + # PyMongo specific tests, since we have multiple code paths for reauth handling. + + def test_reauthenticate_succeeds_bulk_write(self): + # Create a client. + client = self.create_client() # Perform a find operation. client.test.test.find_one() @@ -367,24 +584,21 @@ def test_reauthenticate_succeeds_bulk_write(self): } ): # Perform a bulk write operation. - client.test.test.bulk_write([InsertOne({})]) + client.test.test.bulk_write([InsertOne({})]) # type:ignore[type-var] # Assert that the request callback has been called twice. self.assertEqual(self.request_called, 2) client.close() def test_reauthenticate_succeeds_bulk_read(self): - request_cb = self.create_request_cb() - - # Create a client with the callback. - props: Dict = {"request_token_callback": request_cb} - client = MongoClient(self.uri_single, authmechanismproperties=props) + # Create a client. + client = self.create_client() # Perform a find operation. client.test.test.find_one() # Perform a bulk write operation. - client.test.test.bulk_write([InsertOne({})]) + client.test.test.bulk_write([InsertOne({})]) # type:ignore[type-var] # Assert that the request callback has been called once. self.assertEqual(self.request_called, 1) @@ -404,11 +618,8 @@ def test_reauthenticate_succeeds_bulk_read(self): client.close() def test_reauthenticate_succeeds_cursor(self): - request_cb = self.create_request_cb() - - # Create a client with the callback. - props: Dict = {"request_token_callback": request_cb} - client = MongoClient(self.uri_single, authmechanismproperties=props) + # Create a client. + client = self.create_client() # Perform an insert operation. client.test.test.insert_one({"a": 1}) @@ -431,11 +642,8 @@ def test_reauthenticate_succeeds_cursor(self): client.close() def test_reauthenticate_succeeds_get_more(self): - request_cb = self.create_request_cb() - - # Create a client with the callback. - props: Dict = {"request_token_callback": request_cb} - client = MongoClient(self.uri_single, authmechanismproperties=props) + # Create a client. + client = self.create_client() # Perform an insert operation. client.test.test.insert_many([{"a": 1}, {"a": 1}]) @@ -459,17 +667,13 @@ def test_reauthenticate_succeeds_get_more(self): def test_reauthenticate_succeeds_get_more_exhaust(self): # Ensure no mongos - props = {"request_token_callback": self.create_request_cb()} - client = MongoClient(self.uri_single, authmechanismproperties=props) + client = self.create_client() hello = client.admin.command(HelloCompat.LEGACY_CMD) if hello.get("msg") != "isdbgrid": raise unittest.SkipTest("Must not be a mongos") - request_cb = self.create_request_cb() - # Create a client with the callback. - props: Dict = {"request_token_callback": request_cb} - client = MongoClient(self.uri_single, authmechanismproperties=props) + client = self.create_client() # Perform an insert operation. client.test.test.insert_many([{"a": 1}, {"a": 1}]) @@ -492,13 +696,8 @@ def test_reauthenticate_succeeds_get_more_exhaust(self): client.close() def test_reauthenticate_succeeds_command(self): - request_cb = self.create_request_cb() - - # Create a client with the callback. - props: Dict = {"request_token_callback": request_cb} - - print("start of test") - client = MongoClient(self.uri_single, authmechanismproperties=props) + # Create a client. + client = self.create_client() # Perform an insert operation. client.test.test.insert_one({"a": 1}) @@ -521,14 +720,348 @@ def test_reauthenticate_succeeds_command(self): self.assertEqual(self.request_called, 2) client.close() - def test_reauthentication_succeeds_multiple_connections(self): + +class TestAuthOIDCMachine(OIDCTestBase): + uri: str + + def setUp(self): + self.request_called = 0 + + def create_request_cb(self, username=None, sleep=0): + def request_token(context): + assert isinstance(context.timeout_seconds, int) + assert context.version == 1 + assert context.refresh_token is None + assert context.idp_info is None + token = self.get_token(username) + time.sleep(sleep) + self.request_called += 1 + return OIDCCallbackResult(access_token=token) + + class Inner(OIDCCallback): + def fetch(self, context): + return request_token(context) + + return Inner() + + def create_client(self, *args, **kwargs): + request_cb = kwargs.pop("request_cb", self.create_request_cb()) + props = kwargs.pop("authmechanismproperties", {"OIDC_CALLBACK": request_cb}) + kwargs["retryReads"] = False + if not len(args): + args = [self.uri_single] + return MongoClient(*args, authmechanismproperties=props, **kwargs) + + def test_1_1_callback_is_called_during_reauthentication(self): + # Create a ``MongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = self.create_client() + # Perform a ``find`` operation that succeeds. + client.test.test.find_one() + # Assert that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + # Close the client. + client.close() + + def test_1_2_callback_is_called_once_for_multiple_connections(self): + # Create a ``MongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = self.create_client() + + # Start 10 threads and run 100 find operations in each thread that all succeed. + def target(): + for _ in range(100): + client.test.test.find_one() + + threads = [] + for _ in range(10): + thread = threading.Thread(target=target) + thread.start() + threads.append(thread) + for thread in threads: + thread.join() + # Assert that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + # Close the client. + client.close() + + def test_2_1_valid_callback_inputs(self): + # Create a MongoClient configured with an OIDC callback that validates its inputs and returns a valid access token. + client = self.create_client() + # Perform a find operation that succeeds. + client.test.test.find_one() + # Assert that the OIDC callback was called with the appropriate inputs, including the timeout parameter if possible. Ensure that there are no unexpected fields. + self.assertEqual(self.request_called, 1) + # Close the client. + client.close() + + def test_2_2_oidc_callback_returns_null(self): + # Create a MongoClient configured with an OIDC callback that returns null. + class CallbackNullToken(OIDCCallback): + def fetch(self, a): + return None + + client = self.create_client(request_cb=CallbackNullToken()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + client.test.test.find_one() + # Close the client. + client.close() + + def test_2_3_oidc_callback_returns_missing_data(self): + # Create a MongoClient configured with an OIDC callback that returns data not conforming to the OIDCCredential with missing fields. + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, a): + self.count += 1 + return object() + + client = self.create_client(request_cb=CustomCallback()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + client.test.test.find_one() + # Close the client. + client.close() + + def test_2_4_invalid_client_configuration_with_callback(self): + # Create a MongoClient configured with an OIDC callback and auth mechanism property ENVIRONMENT:test. request_cb = self.create_request_cb() + props: Dict = {"OIDC_CALLBACK": request_cb, "ENVIRONMENT": "test"} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + self.create_client(authmechanismproperties=props) - # Create a client with the callback. - props: Dict = {"request_token_callback": request_cb} + def test_2_5_invalid_use_of_ALLOWED_HOSTS(self): + # Create an OIDC configured client with auth mechanism properties `{"ENVIRONMENT": "azure", "ALLOWED_HOSTS": []}`. + props: Dict = {"ENVIRONMENT": "azure", "ALLOWED_HOSTS": []} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + self.create_client(authmechanismproperties=props) + + def test_3_1_authentication_failure_with_cached_tokens_fetch_a_new_token_and_retry(self): + # Create a MongoClient and an OIDC callback that implements the provider logic. + client = self.create_client() + # Poison the cache with an invalid access token. + # Set a fail point for ``find`` command. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391, "closeConnection": True}, + } + ): + # Perform a ``find`` operation that fails. This is to force the ``MongoClient`` + # to cache an access token. + with self.assertRaises(AutoReconnect): + client.test.test.find_one() + # Poison the cache of the client. + client.options.pool_options._credentials.cache.data.access_token = "bad" + # Reset the request count. + self.request_called = 0 + # Verify that a find succeeds. + client.test.test.find_one() + # Verify that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + # Close the client. + client.close() + + def test_3_2_authentication_failures_without_cached_tokens_returns_an_error(self): + # Create a MongoClient configured with retryReads=false and an OIDC callback that always returns invalid access tokens. + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, a): + self.count += 1 + return OIDCCallbackResult(access_token="bad value") + + callback = CustomCallback() + client = self.create_client(request_cb=callback) + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + # Verify that the callback was called 1 time. + self.assertEqual(callback.count, 1) + # Close the client. + client.close() + + def test_3_3_unexpected_error_code_does_not_clear_cache(self): + # Create a ``MongoClient`` with a human callback that returns a valid token + client = self.create_client() + + # Set a fail point for ``saslStart`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 20}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + # Assert that the callback has been called once. + self.assertEqual(self.request_called, 1) + + # Perform a ``find`` operation that succeeds. + client.test.test.find_one() + + # Assert that the callback has been called once. + self.assertEqual(self.request_called, 1) + + # Close the client. + client.close() + + def test_4_1_reauthentication_succeds(self): + # Create a ``MongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = self.create_client() + + # Set a fail point for the find command. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that succeeds. + client.test.test.find_one() + + # Verify that the callback was called 2 times (once during the connection + # handshake, and again during reauthentication). + self.assertEqual(self.request_called, 2) + + # Close the client. + client.close() + + def test_4_2_read_commands_fail_if_reauthentication_fails(self): + # Create a ``MongoClient`` whose OIDC callback returns one good token and then + # bad tokens after the first call. + get_token = self.get_token + + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, _): + self.count += 1 + if self.count == 1: + access_token = get_token() + else: + access_token = "bad value" + return OIDCCallbackResult(access_token=access_token) + + callback = CustomCallback() + client = self.create_client(request_cb=callback) + + # Perform a read operation that succeeds. + client.test.test.find_one() + + # Set a fail point for the find command. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + # Verify that the callback was called 2 times. + self.assertEqual(callback.count, 2) + + # Close the client. + client.close() + + def test_4_3_write_commands_fail_if_reauthentication_fails(self): + # Create a ``MongoClient`` whose OIDC callback returns one good token and then + # bad token after the first call. + get_token = self.get_token - client1 = MongoClient(self.uri_single, authmechanismproperties=props) - client2 = MongoClient(self.uri_single, authmechanismproperties=props) + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, _): + self.count += 1 + if self.count == 1: + access_token = get_token() + else: + access_token = "bad value" + return OIDCCallbackResult(access_token=access_token) + + callback = CustomCallback() + client = self.create_client(request_cb=callback) + + # Perform an insert operation that succeeds. + client.test.test.insert_one({}) + + # Set a fail point for the find command. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a ``insert`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.insert_one({}) + + # Verify that the callback was called 2 times. + self.assertEqual(callback.count, 2) + + # Close the client. + client.close() + + def test_5_1_azure_with_no_username(self): + if ENVIRON != "azure": + raise unittest.SkipTest("Test is only supported on Azure") + opts = parse_uri(self.uri_single)["options"] + resource = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + + props = dict(TOKEN_RESOURCE=resource, ENVIRONMENT="azure") + client = self.create_client(authMechanismProperties=props) + client.test.test.find_one() + client.close() + + def test_5_2_azure_with_bad_username(self): + if ENVIRON != "azure": + raise unittest.SkipTest("Test is only supported on Azure") + + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + + props = dict(TOKEN_RESOURCE=token_aud, ENVIRONMENT="azure") + client = self.create_client(username="bad", authmechanismproperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_speculative_auth_success(self): + client1 = self.create_client() + client1.test.test.find_one() + client2 = self.create_client() + + # Prime the cache of the second client. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + # Set a fail point for saslStart commands. + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a find operation. + client2.test.test.find_one() + + # Close the clients. + client2.close() + client1.close() + + def test_reauthentication_succeeds_multiple_connections(self): + client1 = self.create_client() + client2 = self.create_client() # Perform an insert operation. client1.test.test.insert_many([{"a": 1}, {"a": 1}]) diff --git a/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json index dcc3983ae0..9b28df2f9a 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Compact.json b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json index e47c689bf0..27310cb59f 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Compact.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json index d5b04b3ea5..c266aa6b83 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json @@ -55,6 +55,38 @@ "result": { "errorContains": "Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption." } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } } ] } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json index cc8bd17145..c324be8abc 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json b/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json index 905d3c9456..1fb4c1d1bc 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Delete.json index e4150eab8e..ddfe57b00c 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Delete.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json index b579979e94..bdc5c99bc2 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json index 0a84d73650..8e0c6dafa3 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json index 3e0905eadf..1c0a057cad 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json index 4606fbb930..c5e689a3de 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json index c7149d1f5c..6e156ffc60 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json index 008b0c959f..48280f5bd4 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json index a072454112..1e655f0a9c 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json b/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json index 185691d61c..a6843c4737 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Aggregate.json index dea821bd1e..9eaabe0d71 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Aggregate.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Correctness.json index 9e4f525877..fa887e0892 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Correctness.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Delete.json index 7f4094f50c..cce4faf188 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Delete.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-FindOneAndUpdate.json index 5ec0601603..4392b67686 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-FindOneAndUpdate.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-InsertFind.json index efce1511c0..27ce7881df 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-InsertFind.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Update.json index 7f9fadcda4..f7d5a6af66 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Update.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Aggregate.json index fb129392b1..401ee34e3f 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Aggregate.json @@ -2,10 +2,10 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Correctness.json index 5120aecb7a..758d3e5732 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Correctness.json @@ -2,10 +2,10 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Delete.json index de81159b43..24a08f318c 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Delete.json @@ -2,10 +2,10 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json index 36cf91c88c..2a8070ecf9 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json @@ -2,10 +2,10 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-InsertFind.json index 6b5a642aa8..2ef63f42b9 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-InsertFind.json @@ -2,10 +2,10 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Update.json index 8cfb7b525b..8064eb1b18 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Update.json @@ -2,10 +2,10 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json index 801beefe18..8cf143c094 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Correctness.json index b8a6953611..a4b06998f7 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Correctness.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Delete.json index 1abb59bfd1..fad8234838 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Delete.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json index 8d763431fa..fb8f4f4140 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json index 5407fba18b..79562802e6 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Update.json index e5d1a4e059..cc93b76948 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Update.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Aggregate.json index d8c9cacdcc..79f26660f2 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Aggregate.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Correctness.json index 65594bcb11..117e56af62 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Correctness.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Delete.json index 392e722f1f..40d8ed5bb2 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Delete.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-FindOneAndUpdate.json index bbcfb321f5..f0893ce661 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-FindOneAndUpdate.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-InsertFind.json index 9f2c7c9911..d3dc2f830c 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-InsertFind.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Update.json index ce03576f88..9d6a1fbfdd 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Update.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Aggregate.json index b121c72f14..4188685a2c 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Aggregate.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Correctness.json index 6b42ecfe82..60f1ea7a33 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Correctness.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Delete.json index a5c397d0be..4ed591d3f8 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Delete.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json index b6df9463e8..d8fbbfae73 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-InsertFind.json index 1cea25545b..4213b066d1 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-InsertFind.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Update.json index 7703c9057d..89eb4c338d 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Update.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Aggregate.json index 9c2536264d..686f0241ba 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Aggregate.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Correctness.json index 58ccf3efc8..2964624f22 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Correctness.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Delete.json index b20b2750bb..531b3e7590 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Delete.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-FindOneAndUpdate.json index f9c189ace9..402086cdb6 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-FindOneAndUpdate.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-InsertFind.json index 874d4760c8..965b8a5516 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-InsertFind.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Update.json index c2b62b4d1c..6cf44ac782 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Update.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Aggregate.json index afc0f97be1..6edb38a800 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Aggregate.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Correctness.json index cda941de8a..3d33f7381b 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Correctness.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Delete.json index ad344e21b4..1b32782010 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Delete.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-FindOneAndUpdate.json index d447200468..b8e3b888a8 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-FindOneAndUpdate.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-InsertFind.json index 4eb837f28b..d637fcf9e7 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-InsertFind.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Update.json index 3ba7f17c14..1b76019a4c 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Update.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-WrongType.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-WrongType.json index e5e9ddc821..704a693b8f 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Range-WrongType.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-WrongType.json @@ -2,12 +2,12 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Update.json index 14104e2cd8..cb260edc0d 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Update.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json index 4adf6fc07d..901c4dd841 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "7.0.0", - "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/getMore.json b/test/client-side-encryption/spec/legacy/getMore.json index ee99bf7537..94e788ef61 100644 --- a/test/client-side-encryption/spec/legacy/getMore.json +++ b/test/client-side-encryption/spec/legacy/getMore.json @@ -216,7 +216,10 @@ "command_started_event": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "default", "batchSize": 2 diff --git a/test/client-side-encryption/spec/legacy/namedKMS.json b/test/client-side-encryption/spec/legacy/namedKMS.json new file mode 100644 index 0000000000..c859443585 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/namedKMS.json @@ -0,0 +1,197 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "DX3iUuOlBsx6wBX9UZ3v/qXk1HNeBace2J+h/JwsDdF/vmSXLZ1l1VmZYIcpVFy6ODhdbzLjd4pNgg9wcm4etYig62KNkmtZ0/s1tAL5VsuW/s7/3PYnYGznZTFhLjIVcOH/RNoRj2eQb/sRTyivL85wePEpAU/JzuBj6qO9Y5txQgs1k0J3aNy10R9aQ8kC1NuSSpLAIXwE6DlNDDJXhw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local:name2" + } + } + ], + "tests": [ + { + "description": "Automatically encrypt and decrypt with a named KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local:name2": { + "key": { + "$binary": { + "base64": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/unified/namedKMS-createDataKey.json b/test/client-side-encryption/spec/unified/namedKMS-createDataKey.json new file mode 100644 index 0000000000..4d75e4cf51 --- /dev/null +++ b/test/client-side-encryption/spec/unified/namedKMS-createDataKey.json @@ -0,0 +1,396 @@ +{ + "description": "namedKMS-createDataKey", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws:name1": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure:name1": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp:name1": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip:name1": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local:name1": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "create data key with named AWS KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws:name1", + "opts": { + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named Azure KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "azure:name1", + "opts": { + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named GCP KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "gcp:name1", + "opts": { + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named KMIP KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "kmip:name1" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named local KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local:name1" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "local:name1" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/namedKMS-explicit.json b/test/client-side-encryption/spec/unified/namedKMS-explicit.json new file mode 100644 index 0000000000..e28d7e8b30 --- /dev/null +++ b/test/client-side-encryption/spec/unified/namedKMS-explicit.json @@ -0,0 +1,130 @@ +{ + "description": "namedKMS-explicit", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local:name2": { + "key": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + }, + "keyAltNames": [ + "local:name2" + ], + "keyMaterial": { + "$binary": { + "base64": "DX3iUuOlBsx6wBX9UZ3v/qXk1HNeBace2J+h/JwsDdF/vmSXLZ1l1VmZYIcpVFy6ODhdbzLjd4pNgg9wcm4etYig62KNkmtZ0/s1tAL5VsuW/s7/3PYnYGznZTFhLjIVcOH/RNoRj2eQb/sRTyivL85wePEpAU/JzuBj6qO9Y5txQgs1k0J3aNy10R9aQ8kC1NuSSpLAIXwE6DlNDDJXhw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local:name2" + } + } + ] + } + ], + "tests": [ + { + "description": "can explicitly encrypt with a named KMS provider", + "operations": [ + { + "name": "encrypt", + "object": "clientEncryption0", + "arguments": { + "value": "foobar", + "opts": { + "keyAltName": "local:name2", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "expectResult": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC4yX2LTAuN253GAkEO2ZXp4GpCyM7yoVNJMQQl+6uzxMs03IprLC7DL2vr18x9LwOimjTS9YbMJhrnFkEPuNhbg==", + "subType": "06" + } + } + } + ] + }, + { + "description": "can explicitly decrypt with a named KMS provider", + "operations": [ + { + "name": "decrypt", + "object": "clientEncryption0", + "arguments": { + "value": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC4yX2LTAuN253GAkEO2ZXp4GpCyM7yoVNJMQQl+6uzxMs03IprLC7DL2vr18x9LwOimjTS9YbMJhrnFkEPuNhbg==", + "subType": "06" + } + } + }, + "expectResult": "foobar" + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/namedKMS-rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/namedKMS-rewrapManyDataKey.json new file mode 100644 index 0000000000..b3b9bd2477 --- /dev/null +++ b/test/client-side-encryption/spec/unified/namedKMS-rewrapManyDataKey.json @@ -0,0 +1,1385 @@ +{ + "description": "namedKMS-rewrapManyDataKey", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws:name1": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure:name1": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp:name1": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip:name1": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local:name1": { + "key": { + "$$placeholder": 1 + } + }, + "local:name2": { + "key": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + }, + "aws:name2": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "YXp1cmVhenVyZWF6dXJlYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "azure:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "pr01l7qDygUkFE/0peFwpnNlv3iIy8zrQK38Q9i12UCN2jwZHDmfyx8wokiIKMb9kAleeY+vnt3Cf1MKu9kcDmI+KxbNDd+V3ytAAGzOVLDJr77CiWjF9f8ntkXRHrAY9WwnVDANYkDwXlyU0Y2GQFTiW65jiQhUtYLYH63Tk48SsJuQvnWw1Q+PzY8ga+QeVec8wbcThwtm+r2IHsCFnc72Gv73qq7weISw+O4mN08z3wOp5FOS2ZM3MK7tBGmPdBcktW7F8ODGsOQ1FU53OrWUnyX2aTi2ftFFFMWVHqQo7EYuBZHru8RRODNKMyQk0BFfKovAeTAVRv9WH9QU7g==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "Z2NwZ2NwZ2NwZ2NwZ2NwZw==", + "subType": "04" + } + }, + "keyAltNames": [ + "gcp:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0USbQtof/pYRLQO96yg/JEtZbD1UxKueaC37yzT5tTkSiQEAhClWB5ZCSgzHgxv8raWjNB4r7e8ePGdsmSuYTYmLC5oHHS/BdQisConzNKFaobEQZHamTCjyhy5NotKF8MWoo+dyfQApwI29+vAGyrUIQCXzKwRnNdNQ+lb3vJtS5bqvLTvSxKHpVca2kqyC9nhonV+u4qru5Q2bAqUgVFc8fL4pBuvlowZFTQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "a21pcGttaXBrbWlwa21pcA==", + "subType": "04" + } + }, + "keyAltNames": [ + "kmip:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CklVctHzke4mcytd0TxGqvepkdkQN8NUF4+jV7aZQITAKdz6WjdDpq3lMt9nSzWGG2vAEfvRb3mFEVjV57qqGqxjq2751gmiMRHXz0btStbIK3mQ5xbY9kdye4tsixlCryEwQONr96gwlwKKI9Nubl9/8+uRF6tgYjje7Q7OjauEf1SrJwKcoQ3WwnjZmEqAug0kImCpJ/irhdqPzivRiA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "kmip:name1", + "keyId": "1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local:name1" + } + } + ] + } + ], + "tests": [ + { + "description": "rewrap to aws:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "aws:name1_key" + } + }, + "opts": { + "provider": "aws:name1", + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "aws:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to azure:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "azure:name1_key" + } + }, + "opts": { + "provider": "azure:name1", + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "azure:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to gcp:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "gcp:name1_key" + } + }, + "opts": { + "provider": "gcp:name1", + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "gcp:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to kmip:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "kmip:name1_key" + } + }, + "opts": { + "provider": "kmip:name1" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "kmip:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to local:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "local:name1_key" + } + }, + "opts": { + "provider": "local:name1" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "local:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap from local:name1 to local:name2", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$eq": "local:name1_key" + } + }, + "opts": { + "provider": "local:name2" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$eq": "local:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name2" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap from aws:name1 to aws:name2", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$eq": "aws:name1_key" + } + }, + "opts": { + "provider": "aws:name2", + "masterKey": { + "key": "arn:aws:kms:us-east-1:857654397073:key/0f8468f0-f135-4226-aa0b-bd05c4c30df5", + "region": "us-east-1" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$eq": "aws:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name2", + "key": "arn:aws:kms:us-east-1:857654397073:key/0f8468f0-f135-4226-aa0b-bd05c4c30df5", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/command.json b/test/command_logging/command.json new file mode 100644 index 0000000000..d2970df692 --- /dev/null +++ b/test/command_logging/command.json @@ -0,0 +1,215 @@ +{ + "description": "command-logging", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "collectionName": "logging-tests-collection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful command", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "ping", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ping": 1, + "$db": "logging-tests" + } + } + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "databaseName": "logging-tests", + "commandName": "ping", + "reply": { + "$$type": "string" + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A failed command", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "find", + "command": { + "$$type": "string" + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "databaseName": "logging-tests", + "commandName": "find", + "failure": { + "$$exists": true + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/driver-connection-id.json b/test/command_logging/driver-connection-id.json new file mode 100644 index 0000000000..40db98d6fa --- /dev/null +++ b/test/command_logging/driver-connection-id.json @@ -0,0 +1,146 @@ +{ + "description": "driver-connection-id", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "collectionName": "logging-tests-collection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful command", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "ping", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ping", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A failed command", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "find", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "find", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/no-handshake-messages.json b/test/command_logging/no-handshake-messages.json new file mode 100644 index 0000000000..a61e208798 --- /dev/null +++ b/test/command_logging/no-handshake-messages.json @@ -0,0 +1,94 @@ +{ + "description": "no-handshake-command-logs", + "schemaVersion": "1.13", + "tests": [ + { + "description": "Handshake commands should not generate log messages", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + }, + "observeEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionCreatedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "ping" + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/no-heartbeat-messages.json b/test/command_logging/no-heartbeat-messages.json new file mode 100644 index 0000000000..525be9171d --- /dev/null +++ b/test/command_logging/no-heartbeat-messages.json @@ -0,0 +1,91 @@ +{ + "description": "no-heartbeat-command-logs", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "tests": [ + { + "description": "Heartbeat commands should not generate log messages", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": {} + }, + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "ping" + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/operation-id.json b/test/command_logging/operation-id.json new file mode 100644 index 0000000000..b1a3cec3d9 --- /dev/null +++ b/test/command_logging/operation-id.json @@ -0,0 +1,198 @@ +{ + "description": "operation-id", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "collectionName": "logging-tests-collection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Successful bulk write command log messages include operationIds", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "x": 1 + } + } + }, + { + "deleteOne": { + "filter": { + "x": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "insert", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "delete", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "delete", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Failed bulk write command log message includes operationId", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "x": 1 + }, + "update": [ + { + "$invalidOperator": true + } + ] + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "update", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "update", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/pre-42-server-connection-id.json b/test/command_logging/pre-42-server-connection-id.json new file mode 100644 index 0000000000..d5ebd86590 --- /dev/null +++ b/test/command_logging/pre-42-server-connection-id.json @@ -0,0 +1,119 @@ +{ + "description": "pre-42-server-connection-id", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-server-connection-id-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "databaseName": "logging-server-connection-id-tests", + "collectionName": "logging-tests-collection", + "documents": [] + } + ], + "tests": [ + { + "description": "command log messages do not include server connection id", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "insert", + "serverConnectionId": { + "$$exists": false + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "serverConnectionId": { + "$$exists": false + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "find", + "serverConnectionId": { + "$$exists": false + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "find", + "serverConnectionId": { + "$$exists": false + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/redacted-commands.json b/test/command_logging/redacted-commands.json new file mode 100644 index 0000000000..43b9ff74f2 --- /dev/null +++ b/test/command_logging/redacted-commands.json @@ -0,0 +1,1438 @@ +{ + "description": "redacted-commands", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "auth": false + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-redaction-tests" + } + } + ], + "tests": [ + { + "description": "authenticate command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "authenticate", + "command": { + "authenticate": 1, + "mechanism": "MONGODB-X509", + "user": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "db": "$external" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "authenticate", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "authenticate", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to authenticate is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "authenticate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "authenticate", + "command": { + "authenticate": 1, + "mechanism": "MONGODB-X509", + "user": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "authenticate", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "authenticate", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "saslStart command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslStart", + "command": { + "saslStart": 1, + "payload": "definitely-invalid-payload", + "db": "admin" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "saslStart", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "saslStart", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to saslStart is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslStart" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslStart", + "command": { + "saslStart": 1, + "payload": "ZmFrZXNhc2xwYXlsb2Fk", + "mechanism": "MONGODB-X509" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "saslStart", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "saslStart", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "saslContinue command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslContinue", + "command": { + "saslContinue": 1, + "conversationId": 0, + "payload": "definitely-invalid-payload" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "saslContinue", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "saslContinue", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to saslContinue is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslContinue", + "command": { + "saslContinue": 1, + "conversationId": 0, + "payload": "ZmFrZXNhc2xwYXlsb2Fk" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "saslContinue", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "saslContinue", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "getnonce command and server reply are redacted", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "getnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "getnonce", + "reply": { + "$$matchAsDocument": {} + } + } + } + ] + } + ] + }, + { + "description": "network error in response to getnonce is not redacted", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getnonce" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "getnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "getnonce", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "createUser command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "createUser", + "command": { + "createUser": "private", + "pwd": {}, + "roles": [] + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "createUser", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "createUser", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to createUser is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createUser" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "createUser", + "command": { + "createUser": "private", + "pwd": "pwd", + "roles": [] + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "createUser", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "createUser", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "updateUser command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "updateUser", + "command": { + "updateUser": "private", + "pwd": {}, + "roles": [] + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "updateUser", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "updateUser", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to updateUser is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "updateUser" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "updateUser", + "command": { + "updateUser": "private", + "pwd": "pwd", + "roles": [] + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "updateUser", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "updateUser", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "copydbgetnonce command and resulting server-generated error are redacted", + "runOnRequirements": [ + { + "maxServerVersion": "3.6.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": "private" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydbgetnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "copydbgetnonce", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to copydbgetnonce is not redacted", + "runOnRequirements": [ + { + "maxServerVersion": "3.6.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "copydbgetnonce" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": "private" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydbgetnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "copydbgetnonce", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "copydbsaslstart command and resulting server-generated error are redacted", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": "private" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydbsaslstart", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "copydbsaslstart", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to copydbsaslstart is not redacted", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "copydbsaslstart" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": "private" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydbgetnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "copydbgetnonce", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "copydb command and resulting server-generated error are redacted", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydb", + "command": { + "copydb": "private" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydb", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "copydb", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to copydb is not redacted", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "copydb" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydb", + "command": { + "copydb": "private" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydb", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "copydb", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "hello with speculative authenticate command and server reply are redacted", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "hello", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "hello", + "reply": { + "$$matchAsDocument": {} + } + } + } + ] + } + ] + }, + { + "description": "legacy hello with speculative authenticate command and server reply are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "ismaster", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ismaster", + "reply": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "isMaster", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "isMaster", + "reply": { + "$$matchAsDocument": {} + } + } + } + ] + } + ] + }, + { + "description": "hello without speculative authenticate command and server reply are not redacted", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "hello", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "hello": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "hello", + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1, + "isWritablePrimary": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello without speculative authenticate command and server reply are not redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "ismaster", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ismaster": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ismaster", + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1, + "ismaster": true + } + } + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "isMaster", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "isMaster": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "isMaster", + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1, + "ismaster": true + } + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/server-connection-id.json b/test/command_logging/server-connection-id.json new file mode 100644 index 0000000000..abbbbc7442 --- /dev/null +++ b/test/command_logging/server-connection-id.json @@ -0,0 +1,131 @@ +{ + "description": "server-connection-id", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-server-connection-id-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "databaseName": "logging-server-connection-id-tests", + "collectionName": "logging-tests-collection", + "documents": [] + } + ], + "tests": [ + { + "description": "command log messages include server connection id", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "insert", + "serverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "serverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "find", + "serverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "find", + "serverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/service-id.json b/test/command_logging/service-id.json new file mode 100644 index 0000000000..ea39d61231 --- /dev/null +++ b/test/command_logging/service-id.json @@ -0,0 +1,207 @@ +{ + "description": "service-id", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-server-connection-id-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "databaseName": "logging-server-connection-id-tests", + "collectionName": "logging-tests-collection", + "documents": [] + } + ], + "tests": [ + { + "description": "command log messages include serviceId when in LB mode", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "insert", + "serviceId": { + "$$type": "string" + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "serviceId": { + "$$type": "string" + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "find", + "serviceId": { + "$$type": "string" + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "find", + "serviceId": { + "$$type": "string" + } + } + } + ] + } + ] + }, + { + "description": "command log messages omit serviceId when not in LB mode", + "runOnRequirements": [ + { + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "insert", + "serviceId": { + "$$exists": false + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "serviceId": { + "$$exists": false + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "find", + "serviceId": { + "$$exists": false + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "find", + "serviceId": { + "$$exists": false + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/unacknowledged-write.json b/test/command_logging/unacknowledged-write.json new file mode 100644 index 0000000000..dad0c0a36a --- /dev/null +++ b/test/command_logging/unacknowledged-write.json @@ -0,0 +1,150 @@ +{ + "description": "unacknowledged-write", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "logging-tests-collection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "An unacknowledged write generates a succeeded log message with ok: 1 reply", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "insert", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "insert": "logging-tests-collection", + "$db": "logging-tests" + } + } + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "reply": { + "$$matchAsDocument": { + "ok": 1 + } + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/pre-42-server-connection-id.json b/test/command_monitoring/pre-42-server-connection-id.json new file mode 100644 index 0000000000..141fbe584f --- /dev/null +++ b/test/command_monitoring/pre-42-server-connection-id.json @@ -0,0 +1,101 @@ +{ + "description": "pre-42-server-connection-id", + "schemaVersion": "1.6", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "server-connection-id-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "server-connection-id-tests", + "collectionName": "coll", + "documents": [] + } + ], + "tests": [ + { + "description": "command events do not include server connection id", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "hasServerConnectionId": false + } + }, + { + "commandSucceededEvent": { + "commandName": "insert", + "hasServerConnectionId": false + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "hasServerConnectionId": false + } + }, + { + "commandFailedEvent": { + "commandName": "find", + "hasServerConnectionId": false + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/server-connection-id.json b/test/command_monitoring/server-connection-id.json new file mode 100644 index 0000000000..a8f27637fc --- /dev/null +++ b/test/command_monitoring/server-connection-id.json @@ -0,0 +1,101 @@ +{ + "description": "server-connection-id", + "schemaVersion": "1.6", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "server-connection-id-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "server-connection-id-tests", + "collectionName": "coll", + "documents": [] + } + ], + "tests": [ + { + "description": "command events include server connection id", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "hasServerConnectionId": true + } + }, + { + "commandSucceededEvent": { + "commandName": "insert", + "hasServerConnectionId": true + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "hasServerConnectionId": true + } + }, + { + "commandFailedEvent": { + "commandName": "find", + "hasServerConnectionId": true + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/unacknowledgedBulkWrite.json b/test/command_monitoring/unacknowledgedBulkWrite.json index 4c16d6df11..782cb84a5b 100644 --- a/test/command_monitoring/unacknowledgedBulkWrite.json +++ b/test/command_monitoring/unacknowledgedBulkWrite.json @@ -1,6 +1,6 @@ { "description": "unacknowledgedBulkWrite", - "schemaVersion": "1.0", + "schemaVersion": "1.7", "createEntities": [ { "client": { @@ -64,11 +64,29 @@ ], "ordered": false } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": "unorderedBulkWriteInsertW0", + "x": 44 + } + ] } ], "expectEvents": [ { "client": "client", + "ignoreExtraEvents": true, "events": [ { "commandStartedEvent": { diff --git a/test/command_monitoring/writeConcernError.json b/test/command_monitoring/writeConcernError.json index 7bc16f2ab7..1bfae9f951 100644 --- a/test/command_monitoring/writeConcernError.json +++ b/test/command_monitoring/writeConcernError.json @@ -3,7 +3,7 @@ "schemaVersion": "1.4", "runOnRequirements": [ { - "minServerVersion": "4.1.0", + "minServerVersion": "4.3.1", "topologies": [ "replicaset" ], @@ -66,11 +66,11 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { - "code": 91, - "errorLabels": [ - "RetryableWriteError" - ] + "code": 91 } } } @@ -112,11 +112,11 @@ "reply": { "ok": 1, "n": 1, + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { - "code": 91, - "errorLabels": [ - "RetryableWriteError" - ] + "code": 91 } }, "commandName": "insert" @@ -152,4 +152,4 @@ ] } ] -} +} \ No newline at end of file diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json new file mode 100644 index 0000000000..bfbdbe8639 --- /dev/null +++ b/test/connection_logging/connection-logging.json @@ -0,0 +1,523 @@ +{ + "description": "connection-logging", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient" + } + } + ], + "tests": [ + { + "description": "Create a client, run a command, and close the client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "connection": "debug" + } + } + } + ] + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout started", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection created", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection ready", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked out", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked in", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout started", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked out", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked in", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection closed", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "Connection pool was closed" + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool closed", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Connection checkout fails due to error establishing connection", + "runOnRequirements": [ + { + "auth": true, + "minServerVersion": "4.0" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryReads": false, + "appname": "clientAppName", + "heartbeatFrequencyMS": 10000 + }, + "observeLogMessages": { + "connection": "debug" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "clientAppName" + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout started", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection created", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection closed", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "An error occurred while using the connection", + "error": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout failed", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "An error occurred while trying to establish a new connection", + "error": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool cleared", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/test/connection_logging/connection-pool-options.json b/test/connection_logging/connection-pool-options.json new file mode 100644 index 0000000000..7055a54869 --- /dev/null +++ b/test/connection_logging/connection-pool-options.json @@ -0,0 +1,458 @@ +{ + "description": "connection-pool-options", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "tests": [ + { + "description": "Options should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "connectionReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "minPoolSize": 1, + "maxPoolSize": 5, + "maxIdleTimeMS": 10000 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "minPoolSize": 1, + "maxPoolSize": 5, + "maxIdleTimeMS": 10000 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection created", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection ready", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "maxConnecting should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "maxConnecting": 5 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "maxConnecting": 5 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "waitQueueTimeoutMS should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "waitQueueTimeoutMS": 10000 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "waitQueueTimeoutMS": 10000 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "waitQueueSize should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "waitQueueSize": 100 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "waitQueueSize": 100 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "waitQueueMultiple should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "waitQueueSize": 5 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "waitQueueMultiple": 5 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/cmap/connection-must-have-id.json b/test/connection_monitoring/connection-must-have-id.json similarity index 100% rename from test/cmap/connection-must-have-id.json rename to test/connection_monitoring/connection-must-have-id.json diff --git a/test/cmap/connection-must-order-ids.json b/test/connection_monitoring/connection-must-order-ids.json similarity index 100% rename from test/cmap/connection-must-order-ids.json rename to test/connection_monitoring/connection-must-order-ids.json diff --git a/test/cmap/pool-checkin-destroy-closed.json b/test/connection_monitoring/pool-checkin-destroy-closed.json similarity index 100% rename from test/cmap/pool-checkin-destroy-closed.json rename to test/connection_monitoring/pool-checkin-destroy-closed.json diff --git a/test/cmap/pool-checkin-destroy-stale.json b/test/connection_monitoring/pool-checkin-destroy-stale.json similarity index 100% rename from test/cmap/pool-checkin-destroy-stale.json rename to test/connection_monitoring/pool-checkin-destroy-stale.json diff --git a/test/cmap/pool-checkin-make-available.json b/test/connection_monitoring/pool-checkin-make-available.json similarity index 100% rename from test/cmap/pool-checkin-make-available.json rename to test/connection_monitoring/pool-checkin-make-available.json diff --git a/test/cmap/pool-checkin.json b/test/connection_monitoring/pool-checkin.json similarity index 100% rename from test/cmap/pool-checkin.json rename to test/connection_monitoring/pool-checkin.json diff --git a/test/cmap/pool-checkout-connection.json b/test/connection_monitoring/pool-checkout-connection.json similarity index 100% rename from test/cmap/pool-checkout-connection.json rename to test/connection_monitoring/pool-checkout-connection.json diff --git a/test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json b/test/connection_monitoring/pool-checkout-custom-maxConnecting-is-enforced.json similarity index 100% rename from test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json rename to test/connection_monitoring/pool-checkout-custom-maxConnecting-is-enforced.json diff --git a/test/cmap/pool-checkout-error-closed.json b/test/connection_monitoring/pool-checkout-error-closed.json similarity index 100% rename from test/cmap/pool-checkout-error-closed.json rename to test/connection_monitoring/pool-checkout-error-closed.json diff --git a/test/cmap/pool-checkout-maxConnecting-is-enforced.json b/test/connection_monitoring/pool-checkout-maxConnecting-is-enforced.json similarity index 97% rename from test/cmap/pool-checkout-maxConnecting-is-enforced.json rename to test/connection_monitoring/pool-checkout-maxConnecting-is-enforced.json index 732478bf7e..3a63818bfe 100644 --- a/test/cmap/pool-checkout-maxConnecting-is-enforced.json +++ b/test/connection_monitoring/pool-checkout-maxConnecting-is-enforced.json @@ -19,7 +19,7 @@ ], "closeConnection": false, "blockConnection": true, - "blockTimeMS": 750 + "blockTimeMS": 800 } }, "poolOptions": { @@ -53,7 +53,7 @@ }, { "name": "wait", - "ms": 100 + "ms": 400 }, { "name": "checkOut", diff --git a/test/cmap/pool-checkout-maxConnecting-timeout.json b/test/connection_monitoring/pool-checkout-maxConnecting-timeout.json similarity index 100% rename from test/cmap/pool-checkout-maxConnecting-timeout.json rename to test/connection_monitoring/pool-checkout-maxConnecting-timeout.json diff --git a/test/cmap/pool-checkout-minPoolSize-connection-maxConnecting.json b/test/connection_monitoring/pool-checkout-minPoolSize-connection-maxConnecting.json similarity index 100% rename from test/cmap/pool-checkout-minPoolSize-connection-maxConnecting.json rename to test/connection_monitoring/pool-checkout-minPoolSize-connection-maxConnecting.json diff --git a/test/cmap/pool-checkout-multiple.json b/test/connection_monitoring/pool-checkout-multiple.json similarity index 100% rename from test/cmap/pool-checkout-multiple.json rename to test/connection_monitoring/pool-checkout-multiple.json diff --git a/test/cmap/pool-checkout-no-idle.json b/test/connection_monitoring/pool-checkout-no-idle.json similarity index 100% rename from test/cmap/pool-checkout-no-idle.json rename to test/connection_monitoring/pool-checkout-no-idle.json diff --git a/test/cmap/pool-checkout-no-stale.json b/test/connection_monitoring/pool-checkout-no-stale.json similarity index 100% rename from test/cmap/pool-checkout-no-stale.json rename to test/connection_monitoring/pool-checkout-no-stale.json diff --git a/test/cmap/pool-checkout-returned-connection-maxConnecting.json b/test/connection_monitoring/pool-checkout-returned-connection-maxConnecting.json similarity index 100% rename from test/cmap/pool-checkout-returned-connection-maxConnecting.json rename to test/connection_monitoring/pool-checkout-returned-connection-maxConnecting.json diff --git a/test/cmap/pool-clear-clears-waitqueue.json b/test/connection_monitoring/pool-clear-clears-waitqueue.json similarity index 100% rename from test/cmap/pool-clear-clears-waitqueue.json rename to test/connection_monitoring/pool-clear-clears-waitqueue.json diff --git a/test/connection_monitoring/pool-clear-interrupting-pending-connections.json b/test/connection_monitoring/pool-clear-interrupting-pending-connections.json new file mode 100644 index 0000000000..ceae07a1c7 --- /dev/null +++ b/test/connection_monitoring/pool-clear-interrupting-pending-connections.json @@ -0,0 +1,77 @@ +{ + "version": 1, + "style": "integration", + "description": "clear with interruptInUseConnections = true closes pending connections", + "runOn": [ + { + "minServerVersion": "4.9.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 1000 + } + }, + "poolOptions": { + "minPoolSize": 0 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 1 + }, + { + "name": "clear", + "interruptInUseConnections": true + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCreated" + }, + { + "type": "ConnectionPoolCleared", + "interruptInUseConnections": true + }, + { + "type": "ConnectionClosed" + }, + { + "type": "ConnectionCheckOutFailed" + } + ], + "ignore": [ + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/cmap/pool-clear-min-size.json b/test/connection_monitoring/pool-clear-min-size.json similarity index 100% rename from test/cmap/pool-clear-min-size.json rename to test/connection_monitoring/pool-clear-min-size.json diff --git a/test/cmap/pool-clear-paused.json b/test/connection_monitoring/pool-clear-paused.json similarity index 100% rename from test/cmap/pool-clear-paused.json rename to test/connection_monitoring/pool-clear-paused.json diff --git a/test/cmap/pool-clear-ready.json b/test/connection_monitoring/pool-clear-ready.json similarity index 100% rename from test/cmap/pool-clear-ready.json rename to test/connection_monitoring/pool-clear-ready.json diff --git a/test/connection_monitoring/pool-clear-schedule-run-interruptInUseConnections-false.json b/test/connection_monitoring/pool-clear-schedule-run-interruptInUseConnections-false.json new file mode 100644 index 0000000000..3d7536951d --- /dev/null +++ b/test/connection_monitoring/pool-clear-schedule-run-interruptInUseConnections-false.json @@ -0,0 +1,81 @@ +{ + "version": 1, + "style": "unit", + "description": "Pool clear SHOULD schedule the next background thread run immediately (interruptInUseConnections = false)", + "poolOptions": { + "backgroundThreadIntervalMS": 10000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut" + }, + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "clear", + "interruptInUseConnections": false + }, + { + "name": "waitForEvent", + "event": "ConnectionPoolCleared", + "count": 1, + "timeout": 1000 + }, + { + "name": "waitForEvent", + "event": "ConnectionClosed", + "count": 1, + "timeout": 1000 + }, + { + "name": "close" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 2, + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "interruptInUseConnections": false + }, + { + "type": "ConnectionClosed", + "connectionId": 2, + "reason": "stale", + "address": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionPoolReady", + "ConnectionReady", + "ConnectionCheckOutStarted", + "ConnectionPoolCreated" + ] +} diff --git a/test/cmap/pool-close-destroy-conns.json b/test/connection_monitoring/pool-close-destroy-conns.json similarity index 100% rename from test/cmap/pool-close-destroy-conns.json rename to test/connection_monitoring/pool-close-destroy-conns.json diff --git a/test/cmap/pool-close.json b/test/connection_monitoring/pool-close.json similarity index 100% rename from test/cmap/pool-close.json rename to test/connection_monitoring/pool-close.json diff --git a/test/cmap/pool-create-max-size.json b/test/connection_monitoring/pool-create-max-size.json similarity index 100% rename from test/cmap/pool-create-max-size.json rename to test/connection_monitoring/pool-create-max-size.json diff --git a/test/cmap/pool-create-min-size-error.json b/test/connection_monitoring/pool-create-min-size-error.json similarity index 100% rename from test/cmap/pool-create-min-size-error.json rename to test/connection_monitoring/pool-create-min-size-error.json diff --git a/test/cmap/pool-create-min-size.json b/test/connection_monitoring/pool-create-min-size.json similarity index 100% rename from test/cmap/pool-create-min-size.json rename to test/connection_monitoring/pool-create-min-size.json diff --git a/test/cmap/pool-create-with-options.json b/test/connection_monitoring/pool-create-with-options.json similarity index 100% rename from test/cmap/pool-create-with-options.json rename to test/connection_monitoring/pool-create-with-options.json diff --git a/test/cmap/pool-create.json b/test/connection_monitoring/pool-create.json similarity index 100% rename from test/cmap/pool-create.json rename to test/connection_monitoring/pool-create.json diff --git a/test/cmap/pool-ready-ready.json b/test/connection_monitoring/pool-ready-ready.json similarity index 100% rename from test/cmap/pool-ready-ready.json rename to test/connection_monitoring/pool-ready-ready.json diff --git a/test/cmap/pool-ready.json b/test/connection_monitoring/pool-ready.json similarity index 100% rename from test/cmap/pool-ready.json rename to test/connection_monitoring/pool-ready.json diff --git a/test/cmap/wait-queue-timeout.json b/test/connection_monitoring/wait-queue-timeout.json similarity index 100% rename from test/cmap/wait-queue-timeout.json rename to test/connection_monitoring/wait-queue-timeout.json diff --git a/test/connection_string/test/valid-options.json b/test/connection_string/test/valid-options.json index 01bc2264bb..3c79fe7ae5 100644 --- a/test/connection_string/test/valid-options.json +++ b/test/connection_string/test/valid-options.json @@ -37,6 +37,25 @@ "options": { "tls": true } + }, + { + "description": "Colon in a key value pair", + "uri": "mongodb://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://test-cluster", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": { + "authmechanismProperties": { + "TOKEN_RESOURCE": "mongodb://test-cluster" + } + } } ] } diff --git a/test/connection_string/test/valid-unix_socket-absolute.json b/test/connection_string/test/valid-unix_socket-absolute.json index 5bb02476eb..66491db13b 100644 --- a/test/connection_string/test/valid-unix_socket-absolute.json +++ b/test/connection_string/test/valid-unix_socket-absolute.json @@ -30,6 +30,21 @@ "auth": null, "options": null }, + { + "description": "Unix domain socket (mixed case)", + "uri": "mongodb://%2Ftmp%2FMongoDB-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/MongoDB-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, { "description": "Unix domain socket (absolute path with spaces in path)", "uri": "mongodb://%2Ftmp%2F %2Fmongodb-27017.sock", diff --git a/test/connection_string/test/valid-unix_socket-relative.json b/test/connection_string/test/valid-unix_socket-relative.json index 2ce649ffc2..788720920b 100644 --- a/test/connection_string/test/valid-unix_socket-relative.json +++ b/test/connection_string/test/valid-unix_socket-relative.json @@ -30,6 +30,21 @@ "auth": null, "options": null }, + { + "description": "Unix domain socket (mixed case)", + "uri": "mongodb://rel%2FMongoDB-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/MongoDB-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, { "description": "Unix domain socket (relative path with spaces)", "uri": "mongodb://rel%2F %2Fmongodb-27017.sock", diff --git a/test/connection_string/test/valid-warnings.json b/test/connection_string/test/valid-warnings.json index 1eacbf8fcb..f0e8288bc7 100644 --- a/test/connection_string/test/valid-warnings.json +++ b/test/connection_string/test/valid-warnings.json @@ -93,6 +93,21 @@ ], "auth": null, "options": null + }, + { + "description": "Comma in a key value pair causes a warning", + "uri": "mongodb://localhost?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://host1%2Chost2", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": null, + "options": null } ] } diff --git a/test/csot/retryability-timeoutMS.json b/test/csot/retryability-timeoutMS.json index 642eca0ee9..9daad260ef 100644 --- a/test/csot/retryability-timeoutMS.json +++ b/test/csot/retryability-timeoutMS.json @@ -11,8 +11,7 @@ { "minServerVersion": "4.2", "topologies": [ - "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -109,6 +108,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -199,6 +203,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -328,6 +337,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -420,6 +434,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -547,6 +566,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -635,6 +659,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -761,6 +790,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -852,6 +886,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -983,6 +1022,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1076,6 +1120,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1204,6 +1253,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1292,6 +1346,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1418,6 +1477,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1509,6 +1573,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1640,6 +1709,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1733,6 +1807,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1869,6 +1948,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1965,6 +2049,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2096,6 +2185,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2184,6 +2278,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2304,6 +2403,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2391,6 +2495,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2513,6 +2622,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2601,6 +2715,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2731,6 +2850,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2826,6 +2950,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2956,6 +3085,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3044,6 +3178,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3167,6 +3306,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3255,6 +3399,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3378,6 +3527,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3466,6 +3620,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3589,6 +3748,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3677,6 +3841,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3800,6 +3969,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3888,6 +4062,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4011,6 +4190,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4099,6 +4283,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4219,6 +4408,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4306,6 +4500,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4429,6 +4628,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4518,6 +4722,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4642,6 +4851,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4730,6 +4944,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4853,6 +5072,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4941,6 +5165,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5061,6 +5290,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5148,6 +5382,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5270,6 +5509,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5358,6 +5602,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/discovery_and_monitoring/unified/interruptInUse-pool-clear.json b/test/discovery_and_monitoring/unified/interruptInUse-pool-clear.json new file mode 100644 index 0000000000..6fdef55b4e --- /dev/null +++ b/test/discovery_and_monitoring/unified/interruptInUse-pool-clear.json @@ -0,0 +1,552 @@ +{ + "description": "interruptInUse", + "schemaVersion": "1.11", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "topologies": [ + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "interruptInUse", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Connection pool clear uses interruptInUseConnections=true after monitor timeout", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "connectTimeoutMS": 500, + "heartbeatFrequencyMS": 500, + "appname": "interruptInUse", + "retryReads": false, + "minPoolSize": 0 + }, + "observeEvents": [ + "poolClearedEvent", + "connectionClosedEvent", + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "interruptInUse" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { "_id" : 1 } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { "$where": "sleep(2000) || true" } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", "isMaster" + ], + "blockConnection": true, + "blockTimeMS": 1500, + "appName": "interruptInUse" + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": { } + }, + { + "connectionCheckedInEvent": { } + }, + { + "connectionCheckedOutEvent": { } + }, + { + "poolClearedEvent": { + "interruptInUseConnections": true + } + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { } + } + ] + } + ], + "outcome": [{ + "collectionName": "interruptInUse", + "databaseName": "sdam-tests", + "documents": [{ "_id": 1 }] + }] + }, + { + "description": "Error returned from connection pool clear with interruptInUseConnections=true is retryable", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "connectTimeoutMS": 500, + "heartbeatFrequencyMS": 500, + "appname": "interruptInUseRetryable", + "retryReads": true, + "minPoolSize": 0 + }, + "observeEvents": [ + "poolClearedEvent", + "connectionClosedEvent", + "commandFailedEvent", + "commandStartedEvent", + "commandSucceededEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "interruptInUse" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { "_id" : 1 } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { "$where": "sleep(2000) || true" } + } + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", "isMaster" + ], + "blockConnection": true, + "blockTimeMS": 1500, + "appName": "interruptInUseRetryable" + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": { } + }, + { + "connectionCheckedInEvent": { } + }, + { + "connectionCheckedOutEvent": { } + }, + { + "poolClearedEvent": { + "interruptInUseConnections": true + } + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { } + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ], + "outcome": [{ + "collectionName": "interruptInUse", + "databaseName": "sdam-tests", + "documents": [{ "_id": 1 }] + }] + }, + { + "description": "Error returned from connection pool clear with interruptInUseConnections=true is retryable for write", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "connectTimeoutMS": 500, + "heartbeatFrequencyMS": 500, + "appname": "interruptInUseRetryableWrite", + "retryWrites": true, + "minPoolSize": 0 + }, + "observeEvents": [ + "poolClearedEvent", + "connectionClosedEvent", + "commandFailedEvent", + "commandStartedEvent", + "commandSucceededEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ]} + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "interruptInUse" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { "_id": 1 } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { "$where": "sleep(2000) || true" }, + "update": [ { "$set": { "a": "bar" } } ] + } + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", "isMaster" + ], + "blockConnection": true, + "blockTimeMS": 1500, + "appName": "interruptInUseRetryableWrite" + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandFailedEvent": { + "commandName": "update" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": { } + }, + { + "connectionCheckedInEvent": { } + }, + { + "connectionCheckedOutEvent": { } + }, + { + "poolClearedEvent": { + "interruptInUseConnections": true + } + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { } + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ], + "outcome": [{ + "collectionName": "interruptInUse", + "databaseName": "sdam-tests", + "documents": [{ "_id": 1, "a" : "bar"}] + }] + } + ] +} \ No newline at end of file diff --git a/test/index_management/createSearchIndex.json b/test/index_management/createSearchIndex.json index 04cffbe9c9..f9c4e44d3e 100644 --- a/test/index_management/createSearchIndex.json +++ b/test/index_management/createSearchIndex.json @@ -55,7 +55,7 @@ }, "expectError": { "isError": true, - "errorContains": "Search index commands are only supported with Atlas" + "errorContains": "Atlas" } } ], @@ -102,7 +102,7 @@ }, "expectError": { "isError": true, - "errorContains": "Search index commands are only supported with Atlas" + "errorContains": "Atlas" } } ], diff --git a/test/index_management/createSearchIndexes.json b/test/index_management/createSearchIndexes.json index 95dbedde77..3cf56ce12e 100644 --- a/test/index_management/createSearchIndexes.json +++ b/test/index_management/createSearchIndexes.json @@ -49,7 +49,7 @@ }, "expectError": { "isError": true, - "errorContains": "Search index commands are only supported with Atlas" + "errorContains": "Atlas" } } ], @@ -89,7 +89,7 @@ }, "expectError": { "isError": true, - "errorContains": "Search index commands are only supported with Atlas" + "errorContains": "Atlas" } } ], @@ -138,7 +138,7 @@ }, "expectError": { "isError": true, - "errorContains": "Search index commands are only supported with Atlas" + "errorContains": "Atlas" } } ], diff --git a/test/index_management/dropSearchIndex.json b/test/index_management/dropSearchIndex.json index 0f21a5b68d..d8957a2227 100644 --- a/test/index_management/dropSearchIndex.json +++ b/test/index_management/dropSearchIndex.json @@ -49,7 +49,7 @@ }, "expectError": { "isError": true, - "errorContains": "Search index commands are only supported with Atlas" + "errorContains": "Atlas" } } ], diff --git a/test/index_management/listSearchIndexes.json b/test/index_management/listSearchIndexes.json index 24c51ad88c..a8cef42f7a 100644 --- a/test/index_management/listSearchIndexes.json +++ b/test/index_management/listSearchIndexes.json @@ -46,7 +46,7 @@ "object": "collection0", "expectError": { "isError": true, - "errorContains": "Search index commands are only supported with Atlas" + "errorContains": "Atlas" } } ], @@ -81,7 +81,7 @@ }, "expectError": { "isError": true, - "errorContains": "Search index commands are only supported with Atlas" + "errorContains": "Atlas" } } ], @@ -122,7 +122,7 @@ }, "expectError": { "isError": true, - "errorContains": "Search index commands are only supported with Atlas" + "errorContains": "Atlas" } } ], diff --git a/test/index_management/searchIndexIgnoresReadWriteConcern.json b/test/index_management/searchIndexIgnoresReadWriteConcern.json new file mode 100644 index 0000000000..edf71b7b7e --- /dev/null +++ b/test/index_management/searchIndexIgnoresReadWriteConcern.json @@ -0,0 +1,252 @@ +{ + "description": "search index operations ignore read and write concern", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "createSearchIndex ignores read and write concern", + "operations": [ + { + "name": "createSearchIndex", + "object": "collection0", + "arguments": { + "model": { + "definition": { + "mappings": { + "dynamic": true + } + } + } + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + } + } + ], + "$db": "database0", + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "createSearchIndexes ignores read and write concern", + "operations": [ + { + "name": "createSearchIndexes", + "object": "collection0", + "arguments": { + "models": [] + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [], + "$db": "database0", + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "dropSearchIndex ignores read and write concern", + "operations": [ + { + "name": "dropSearchIndex", + "object": "collection0", + "arguments": { + "name": "test index" + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "dropSearchIndex": "collection0", + "name": "test index", + "$db": "database0", + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "listSearchIndexes ignores read and write concern", + "operations": [ + { + "name": "listSearchIndexes", + "object": "collection0", + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$listSearchIndexes": {} + } + ], + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "updateSearchIndex ignores the read and write concern", + "operations": [ + { + "name": "updateSearchIndex", + "object": "collection0", + "arguments": { + "name": "test index", + "definition": {} + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "updateSearchIndex": "collection0", + "name": "test index", + "definition": {}, + "$db": "database0", + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/updateSearchIndex.json b/test/index_management/updateSearchIndex.json index 88a46a3069..76a5962146 100644 --- a/test/index_management/updateSearchIndex.json +++ b/test/index_management/updateSearchIndex.json @@ -50,7 +50,7 @@ }, "expectError": { "isError": true, - "errorContains": "Search index commands are only supported with Atlas" + "errorContains": "Atlas" } } ], diff --git a/test/lambda/mongodb/app.py b/test/lambda/mongodb/app.py index 65e6dc88ff..5840347d9a 100644 --- a/test/lambda/mongodb/app.py +++ b/test/lambda/mongodb/app.py @@ -130,7 +130,12 @@ def create_response(): # Reset the numbers. def reset(): - global open_connections, heartbeat_count, total_heartbeat_duration, total_commands, total_command_duration + global \ + open_connections, \ + heartbeat_count, \ + total_heartbeat_duration, \ + total_commands, \ + total_command_duration open_connections = 0 heartbeat_count = 0 total_heartbeat_duration = 0 diff --git a/test/mockupdb/test_cursor.py b/test/mockupdb/test_cursor.py index 1cf3a05ed5..96a7e17053 100644 --- a/test/mockupdb/test_cursor.py +++ b/test/mockupdb/test_cursor.py @@ -16,11 +16,13 @@ from __future__ import annotations import unittest +from test import PyMongoTestCase from mockupdb import MockupDB, OpMsg, going from bson.objectid import ObjectId from pymongo import MongoClient +from pymongo.errors import OperationFailure class TestCursor(unittest.TestCase): @@ -57,5 +59,31 @@ def test_getmore_load_balanced(self): request.replies({"cursor": {"id": cursor_id, "nextBatch": [{}]}}) +class TestRetryableErrorCodeCatch(PyMongoTestCase): + def _test_fail_on_operation_failure_with_code(self, code): + """Test reads on error codes that should not be retried""" + server = MockupDB() + server.run() + self.addCleanup(server.stop) + server.autoresponds("ismaster", maxWireVersion=6) + + client = MongoClient(server.uri) + + with going(lambda: server.receives(OpMsg({"find": "collection"})).command_err(code=code)): + cursor = client.db.collection.find() + with self.assertRaises(OperationFailure) as ctx: + cursor.next() + self.assertEqual(ctx.exception.code, code) + + def test_fail_on_operation_failure_none(self): + self._test_fail_on_operation_failure_with_code(None) + + def test_fail_on_operation_failure_zero(self): + self._test_fail_on_operation_failure_with_code(0) + + def test_fail_on_operation_failure_one(self): + self._test_fail_on_operation_failure_with_code(1) + + if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index dff1288e67..8ee33431a8 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -47,7 +47,6 @@ def test_aggregate(self): secondary_collection = collection.with_options(read_preference=ReadPreference.SECONDARY) with going(secondary_collection.aggregate, []): - command = server.receives( OpMsg( { diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index 12c0bec9ac..19dfb9e395 100644 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -22,6 +22,7 @@ from pymongo import MongoClient from pymongo.errors import ConnectionFailure +from pymongo.operations import _Op from pymongo.server_type import SERVER_TYPE @@ -68,7 +69,7 @@ def _test_disconnect(self, operation): # Server is Unknown. topology = self.client._topology with self.assertRaises(ConnectionFailure): - topology.select_server_by_address(self.server.address, 0) + topology.select_server_by_address(self.server.address, _Op.TEST, 0) time.sleep(0.5) after = time.time() @@ -95,7 +96,7 @@ def _test_timeout(self, operation): # Server is *not* Unknown. topology = self.client._topology - server = topology.select_server_by_address(self.server.address, 0) + server = topology.select_server_by_address(self.server.address, _Op.TEST, 0) assert server is not None self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) @@ -117,7 +118,7 @@ def _test_not_master(self, operation): # Server is rediscovered. topology = self.client._topology - server = topology.select_server_by_address(self.server.address, 0) + server = topology.select_server_by_address(self.server.address, _Op.TEST, 0) assert server is not None self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) diff --git a/test/mockupdb/test_standalone_shard.py b/test/mockupdb/test_standalone_shard.py new file mode 100644 index 0000000000..8d388cf74f --- /dev/null +++ b/test/mockupdb/test_standalone_shard.py @@ -0,0 +1,57 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test errors that come from a standalone shard.""" +from __future__ import annotations + +import unittest + +from mockupdb import MockupDB, going + +from pymongo import MongoClient +from pymongo.errors import OperationFailure + + +class TestStandaloneShard(unittest.TestCase): + # See PYTHON-2048 and SERVER-44591. + def test_bulk_txn_error_message(self): + server = MockupDB(auto_ismaster={"maxWireVersion": 8}) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + self.addCleanup(client.close) + + with self.assertRaisesRegex( + OperationFailure, "This MongoDB deployment does not support retryable writes" + ): + with going(client.db.collection.insert_many, [{}, {}]): + request = server.receives() + request.reply( + { + "n": 0, + "ok": 1.0, + "writeErrors": [ + { + "code": 20, + "codeName": "IllegalOperation", + "errmsg": "Transaction numbers are only allowed on a replica set member or mongos", + "index": 0, + } + ], + } + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mod_wsgi_test/test_client.py b/test/mod_wsgi_test/test_client.py index 63ae883473..88eeb7a57e 100644 --- a/test/mod_wsgi_test/test_client.py +++ b/test/mod_wsgi_test/test_client.py @@ -131,8 +131,7 @@ def main(options, mode, urls): if options.verbose: print( - "Getting {} {} times total in {} threads, " - "{} times per thread".format( + "Getting {} {} times total in {} threads, " "{} times per thread".format( urls, nrequests_per_thread * options.nthreads, options.nthreads, diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 2ad4edaf8f..43b4da786a 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -1,4 +1,4 @@ -# Copyright 2015 MongoDB, Inc. +# Copyright 2015-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,16 +12,41 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the MongoDB Driver Performance Benchmarking Spec.""" +"""Tests for the MongoDB Driver Performance Benchmarking Spec. + +See https://github.com/mongodb/specifications/blob/master/source/benchmarking/benchmarking.md + + +To set up the benchmarks locally:: + + python -m pip install simplejson + git clone --depth 1 https://github.com/mongodb/specifications.git + pushd specifications/source/benchmarking/data + tar xf extended_bson.tgz + tar xf parallel.tgz + tar xf single_and_multi_document.tgz + popd + export TEST_PATH="specifications/source/benchmarking/data" + export OUTPUT_FILE="results.json" + +Then to run all benchmarks quickly:: + + FASTBENCH=1 python test/performance/perf_test.py -v + +To run individual benchmarks quickly:: + + FASTBENCH=1 python test/performance/perf_test.py -v TestRunCommand TestFindManyAndEmptyCursor +""" from __future__ import annotations import multiprocessing as mp import os import sys import tempfile +import threading import time import warnings -from typing import Any, List +from typing import Any, List, Optional try: import simplejson as json @@ -30,16 +55,24 @@ sys.path[0:0] = [""] -from test import client_context, host, port, unittest +from test import client_context, unittest -from bson import decode, encode -from bson.json_util import loads +from bson import decode, encode, json_util from gridfs import GridFSBucket from pymongo import MongoClient +# Spec says to use at least 1 minute cumulative execution time and up to 100 iterations or 5 minutes but that +# makes the benchmarks too slow. Instead, we use at least 30 seconds and at most 60 seconds. NUM_ITERATIONS = 100 -MAX_ITERATION_TIME = 300 +MIN_ITERATION_TIME = 30 +MAX_ITERATION_TIME = 60 NUM_DOCS = 10000 +# When debugging or prototyping it's often useful to run the benchmarks locally, set FASTBENCH=1 to run quickly. +if bool(os.getenv("FASTBENCH")): + NUM_ITERATIONS = 2 + MIN_ITERATION_TIME = 0.1 + MAX_ITERATION_TIME = 0.5 + NUM_DOCS = 1000 TEST_PATH = os.environ.get( "TEST_PATH", os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.join("data")) @@ -69,34 +102,47 @@ def __exit__(self, *args): self.interval = self.end - self.start +def threaded(n_threads, func): + threads = [threading.Thread(target=func) for _ in range(n_threads)] + for t in threads: + t.start() + for t in threads: + t.join() + + class PerformanceTest: - dataset: Any - data_size: Any - do_task: Any + dataset: str + data_size: int fail: Any + n_threads: int = 1 @classmethod def setUpClass(cls): client_context.init() def setUp(self): - pass + self.setup_time = time.monotonic() def tearDown(self): - name = self.__class__.__name__ + duration = time.monotonic() - self.setup_time + # Remove "Test" so that TestFlatEncoding is reported as "FlatEncoding". + name = self.__class__.__name__[4:] median = self.percentile(50) - bytes_per_sec = self.data_size / median - print(f"Running {self.__class__.__name__}. MEDIAN={self.percentile(50)}") + megabytes_per_sec = (self.data_size * self.n_threads) / median / 1000000 + print( + f"Completed {self.__class__.__name__} {megabytes_per_sec:.3f} MB/s, MEDIAN={self.percentile(50):.3f}s, " + f"total time={duration:.3f}s, iterations={len(self.results)}" + ) result_data.append( { "info": { "test_name": name, "args": { - "threads": 1, + "threads": self.n_threads, }, }, "metrics": [ - {"name": "bytes_per_sec", "value": bytes_per_sec}, + {"name": "megabytes_per_sec", "type": "MEDIAN", "value": megabytes_per_sec}, ], } ) @@ -104,6 +150,9 @@ def tearDown(self): def before(self): pass + def do_task(self): + raise NotImplementedError + def after(self): pass @@ -119,37 +168,64 @@ def percentile(self, percentile): def runTest(self): results = [] start = time.monotonic() - self.max_iterations = NUM_ITERATIONS - for i in range(NUM_ITERATIONS): - if time.monotonic() - start > MAX_ITERATION_TIME: - warnings.warn("Test timed out, completed %s iterations." % i) - break + i = 0 + while True: + i += 1 self.before() with Timer() as timer: - self.do_task() + if self.n_threads == 1: + self.do_task() + else: + threaded(self.n_threads, self.do_task) self.after() results.append(timer.interval) + duration = time.monotonic() - start + if duration > MIN_ITERATION_TIME and i >= NUM_ITERATIONS: + break + if duration > MAX_ITERATION_TIME: + with warnings.catch_warnings(): + warnings.simplefilter("default") + warnings.warn( + f"{self.__class__.__name__} timed out after {MAX_ITERATION_TIME}s, completed {i}/{NUM_ITERATIONS} iterations." + ) + + break self.results = results + def mp_map(self, map_func, files): + with mp.Pool(initializer=proc_init, initargs=(client_context.client_options,)) as pool: + pool.map(map_func, files) + # BSON MICRO-BENCHMARKS -class BsonEncodingTest(PerformanceTest): + + +class MicroTest(PerformanceTest): def setUp(self): + super().setUp() # Location of test data. with open(os.path.join(TEST_PATH, os.path.join("extended_bson", self.dataset))) as data: - self.document = loads(data.read()) + self.file_data = data.read() + + +class BsonEncodingTest(MicroTest): + def setUp(self): + super().setUp() + # Location of test data. + self.document = json_util.loads(self.file_data) + self.data_size = len(encode(self.document)) * NUM_DOCS def do_task(self): for _ in range(NUM_DOCS): encode(self.document) -class BsonDecodingTest(PerformanceTest): +class BsonDecodingTest(MicroTest): def setUp(self): - # Location of test data. - with open(os.path.join(TEST_PATH, os.path.join("extended_bson", self.dataset))) as data: - self.document = encode(json.loads(data.read())) + super().setUp() + self.document = encode(json_util.loads(self.file_data)) + self.data_size = len(self.document) * NUM_DOCS def do_task(self): for _ in range(NUM_DOCS): @@ -158,50 +234,100 @@ def do_task(self): class TestFlatEncoding(BsonEncodingTest, unittest.TestCase): dataset = "flat_bson.json" - data_size = 75310000 class TestFlatDecoding(BsonDecodingTest, unittest.TestCase): dataset = "flat_bson.json" - data_size = 75310000 class TestDeepEncoding(BsonEncodingTest, unittest.TestCase): dataset = "deep_bson.json" - data_size = 19640000 class TestDeepDecoding(BsonDecodingTest, unittest.TestCase): dataset = "deep_bson.json" - data_size = 19640000 class TestFullEncoding(BsonEncodingTest, unittest.TestCase): dataset = "full_bson.json" - data_size = 57340000 class TestFullDecoding(BsonDecodingTest, unittest.TestCase): dataset = "full_bson.json" - data_size = 57340000 + + +# JSON MICRO-BENCHMARKS +class JsonEncodingTest(MicroTest): + def setUp(self): + super().setUp() + # Location of test data. + self.document = json_util.loads(self.file_data) + # Note: use the BSON size as the data size so we can compare BSON vs JSON performance. + self.data_size = len(encode(self.document)) * NUM_DOCS + + def do_task(self): + for _ in range(NUM_DOCS): + json_util.dumps(self.document) + + +class JsonDecodingTest(MicroTest): + def setUp(self): + super().setUp() + self.document = self.file_data + # Note: use the BSON size as the data size so we can compare BSON vs JSON performance. + self.data_size = len(encode(json_util.loads(self.file_data))) * NUM_DOCS + + def do_task(self): + for _ in range(NUM_DOCS): + json_util.loads(self.document) + + +class TestJsonFlatEncoding(JsonEncodingTest, unittest.TestCase): + dataset = "flat_bson.json" + + +class TestJsonFlatDecoding(JsonDecodingTest, unittest.TestCase): + dataset = "flat_bson.json" + + +class TestJsonDeepEncoding(JsonEncodingTest, unittest.TestCase): + dataset = "deep_bson.json" + + +class TestJsonDeepDecoding(JsonDecodingTest, unittest.TestCase): + dataset = "deep_bson.json" + + +class TestJsonFullEncoding(JsonEncodingTest, unittest.TestCase): + dataset = "full_bson.json" + + +class TestJsonFullDecoding(JsonDecodingTest, unittest.TestCase): + dataset = "full_bson.json" # SINGLE-DOC BENCHMARKS class TestRunCommand(PerformanceTest, unittest.TestCase): - data_size = 160000 + data_size = len(encode({"hello": True})) * NUM_DOCS def setUp(self): + super().setUp() self.client = client_context.client self.client.drop_database("perftest") def do_task(self): command = self.client.perftest.command for _ in range(NUM_DOCS): - command("ping") + command("hello", True) + + +class TestRunCommand8Threads(TestRunCommand): + n_threads = 8 class TestDocument(PerformanceTest): def setUp(self): + super().setUp() # Location of test data. with open( os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)) @@ -222,54 +348,62 @@ def after(self): self.client.perftest.drop_collection("corpus") -class TestFindOneByID(TestDocument, unittest.TestCase): - data_size = 16220000 +class FindTest(TestDocument): + dataset = "tweet.json" def setUp(self): - self.dataset = "tweet.json" super().setUp() - + self.data_size = len(encode(self.document)) * NUM_DOCS documents = [self.document.copy() for _ in range(NUM_DOCS)] self.corpus = self.client.perftest.corpus result = self.corpus.insert_many(documents) self.inserted_ids = result.inserted_ids + def before(self): + pass + + def after(self): + pass + + +class TestFindOneByID(FindTest, unittest.TestCase): def do_task(self): find_one = self.corpus.find_one for _id in self.inserted_ids: find_one({"_id": _id}) - def before(self): - pass - def after(self): - pass +class TestFindOneByID8Threads(TestFindOneByID): + n_threads = 8 -class TestSmallDocInsertOne(TestDocument, unittest.TestCase): - data_size = 2750000 +class SmallDocInsertTest(TestDocument): + dataset = "small_doc.json" def setUp(self): - self.dataset = "small_doc.json" super().setUp() - + self.data_size = len(encode(self.document)) * NUM_DOCS self.documents = [self.document.copy() for _ in range(NUM_DOCS)] + +class TestSmallDocInsertOne(SmallDocInsertTest, unittest.TestCase): def do_task(self): insert_one = self.corpus.insert_one for doc in self.documents: insert_one(doc) -class TestLargeDocInsertOne(TestDocument, unittest.TestCase): - data_size = 27310890 +class LargeDocInsertTest(TestDocument): + dataset = "large_doc.json" def setUp(self): - self.dataset = "large_doc.json" super().setUp() + n_docs = 10 + self.data_size = len(encode(self.document)) * n_docs + self.documents = [self.document.copy() for _ in range(n_docs)] - self.documents = [self.document.copy() for _ in range(10)] +class TestLargeDocInsertOne(LargeDocInsertTest, unittest.TestCase): def do_task(self): insert_one = self.corpus.insert_one for doc in self.documents: @@ -277,61 +411,28 @@ def do_task(self): # MULTI-DOC BENCHMARKS -class TestFindManyAndEmptyCursor(TestDocument, unittest.TestCase): - data_size = 16220000 - - def setUp(self): - self.dataset = "tweet.json" - super().setUp() - - for _ in range(10): - self.client.perftest.command("insert", "corpus", documents=[self.document] * 1000) - self.corpus = self.client.perftest.corpus - +class TestFindManyAndEmptyCursor(FindTest, unittest.TestCase): def do_task(self): list(self.corpus.find()) - def before(self): - pass - def after(self): - pass +class TestFindManyAndEmptyCursor8Threads(TestFindManyAndEmptyCursor): + n_threads = 8 -class TestSmallDocBulkInsert(TestDocument, unittest.TestCase): - data_size = 2750000 - - def setUp(self): - self.dataset = "small_doc.json" - super().setUp() - self.documents = [self.document.copy() for _ in range(NUM_DOCS)] - - def before(self): - self.corpus = self.client.perftest.create_collection("corpus") - +class TestSmallDocBulkInsert(SmallDocInsertTest, unittest.TestCase): def do_task(self): self.corpus.insert_many(self.documents, ordered=True) -class TestLargeDocBulkInsert(TestDocument, unittest.TestCase): - data_size = 27310890 - - def setUp(self): - self.dataset = "large_doc.json" - super().setUp() - self.documents = [self.document.copy() for _ in range(10)] - - def before(self): - self.corpus = self.client.perftest.create_collection("corpus") - +class TestLargeDocBulkInsert(LargeDocInsertTest, unittest.TestCase): def do_task(self): self.corpus.insert_many(self.documents, ordered=True) -class TestGridFsUpload(PerformanceTest, unittest.TestCase): - data_size = 52428800 - +class GridFsTest(PerformanceTest): def setUp(self): + super().setUp() self.client = client_context.client self.client.drop_database("perftest") @@ -340,56 +441,41 @@ def setUp(self): ) with open(gridfs_path, "rb") as data: self.document = data.read() - + self.data_size = len(self.document) self.bucket = GridFSBucket(self.client.perftest) def tearDown(self): super().tearDown() self.client.drop_database("perftest") + +class TestGridFsUpload(GridFsTest, unittest.TestCase): def before(self): + # Create the bucket. self.bucket.upload_from_stream("init", b"x") def do_task(self): self.bucket.upload_from_stream("gridfstest", self.document) -class TestGridFsDownload(PerformanceTest, unittest.TestCase): - data_size = 52428800 - +class TestGridFsDownload(GridFsTest, unittest.TestCase): def setUp(self): - self.client = client_context.client - self.client.drop_database("perftest") - - gridfs_path = os.path.join( - TEST_PATH, os.path.join("single_and_multi_document", "gridfs_large.bin") - ) - - self.bucket = GridFSBucket(self.client.perftest) - with open(gridfs_path, "rb") as gfile: - self.uploaded_id = self.bucket.upload_from_stream("gridfstest", gfile) - - def tearDown(self): - super().tearDown() - self.client.drop_database("perftest") + super().setUp() + self.uploaded_id = self.bucket.upload_from_stream("gridfstest", self.document) def do_task(self): self.bucket.open_download_stream(self.uploaded_id).read() -proc_client = None +proc_client: Optional[MongoClient] = None -def proc_init(*dummy): +def proc_init(client_kwargs): global proc_client - proc_client = MongoClient(host, port) + proc_client = MongoClient(**client_kwargs) # PARALLEL BENCHMARKS -def mp_map(map_func, files): - pool = mp.Pool(initializer=proc_init) - pool.map(map_func, files) - pool.close() def insert_json_file(filename): @@ -414,13 +500,10 @@ def insert_json_file_with_file_id(filename): def read_json_file(filename): assert proc_client is not None coll = proc_client.perftest.corpus - temp = tempfile.TemporaryFile(mode="w") - try: - temp.writelines( - [json.dumps(doc) + "\n" for doc in coll.find({"file": filename}, {"_id": False})] - ) - finally: - temp.close() + with tempfile.TemporaryFile(mode="w") as temp: + for doc in coll.find({"file": filename}, {"_id": False}): + temp.write(json.dumps(doc)) + temp.write("\n") def insert_gridfs_file(filename): @@ -443,24 +526,23 @@ def read_gridfs_file(filename): class TestJsonMultiImport(PerformanceTest, unittest.TestCase): - data_size = 565000000 - def setUp(self): + super().setUp() self.client = client_context.client self.client.drop_database("perftest") + ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) + self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] + self.data_size = sum(os.path.getsize(fname) for fname in self.files) + self.corpus = self.client.perftest.corpus def before(self): self.client.perftest.command({"create": "corpus"}) - self.corpus = self.client.perftest.corpus - - ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) - self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] def do_task(self): - mp_map(insert_json_file, self.files) + self.mp_map(insert_json_file, self.files) def after(self): - self.client.perftest.drop_collection("corpus") + self.corpus.drop() def tearDown(self): super().tearDown() @@ -468,20 +550,20 @@ def tearDown(self): class TestJsonMultiExport(PerformanceTest, unittest.TestCase): - data_size = 565000000 - def setUp(self): + super().setUp() self.client = client_context.client self.client.drop_database("perftest") self.client.perfest.corpus.create_index("file") ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] + self.data_size = sum(os.path.getsize(fname) for fname in self.files) - mp_map(insert_json_file_with_file_id, self.files) + self.mp_map(insert_json_file_with_file_id, self.files) def do_task(self): - mp_map(read_json_file, self.files) + self.mp_map(read_json_file, self.files) def tearDown(self): super().tearDown() @@ -489,11 +571,13 @@ def tearDown(self): class TestGridFsMultiFileUpload(PerformanceTest, unittest.TestCase): - data_size = 262144000 - def setUp(self): + super().setUp() self.client = client_context.client self.client.drop_database("perftest") + gridfs_path = os.path.join(TEST_PATH, os.path.join("parallel", "gridfs_multi")) + self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] + self.data_size = sum(os.path.getsize(fname) for fname in self.files) def before(self): self.client.perftest.drop_collection("fs.files") @@ -504,7 +588,7 @@ def before(self): self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] def do_task(self): - mp_map(insert_gridfs_file, self.files) + self.mp_map(insert_gridfs_file, self.files) def tearDown(self): super().tearDown() @@ -512,9 +596,8 @@ def tearDown(self): class TestGridFsMultiFileDownload(PerformanceTest, unittest.TestCase): - data_size = 262144000 - def setUp(self): + super().setUp() self.client = client_context.client self.client.drop_database("perftest") @@ -522,13 +605,13 @@ def setUp(self): gridfs_path = os.path.join(TEST_PATH, os.path.join("parallel", "gridfs_multi")) self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] - + self.data_size = sum(os.path.getsize(fname) for fname in self.files) for fname in self.files: with open(fname, "rb") as gfile: bucket.upload_from_stream(fname, gfile) def do_task(self): - mp_map(read_gridfs_file, self.files) + self.mp_map(read_gridfs_file, self.files) def tearDown(self): super().tearDown() diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 2b291c7bd3..c750d0cf71 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -47,7 +47,7 @@ def checkout(self, handler=None): assert host_and_port in ( client.mock_standalones + client.mock_members + client.mock_mongoses - ), ("bad host: %s" % host_and_port) + ), "bad host: %s" % host_and_port with Pool.checkout(self, handler) as conn: conn.mock_host = self.mock_host diff --git a/test/qcheck.py b/test/qcheck.py index 739d4948ec..8339bc3763 100644 --- a/test/qcheck.py +++ b/test/qcheck.py @@ -144,7 +144,6 @@ def gen_dbref(): def gen_mongo_value(depth, ref): - choices = [ gen_unicode(gen_range(0, 50)), gen_printable_string(gen_range(0, 50)), diff --git a/test/retryable_reads/unified/exceededTimeLimit.json b/test/retryable_reads/unified/exceededTimeLimit.json new file mode 100644 index 0000000000..8d090bbe3f --- /dev/null +++ b/test/retryable_reads/unified/exceededTimeLimit.json @@ -0,0 +1,147 @@ +{ + "description": "ExceededTimeLimit is a retryable read", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "exceededtimelimit-test" + } + } + ], + "initialData": [ + { + "collectionName": "exceededtimelimit-test", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds on second attempt after ExceededTimeLimit", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 262 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "exceededtimelimit-test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "exceededtimelimit-test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/readConcernMajorityNotAvailableYet.json b/test/retryable_reads/unified/readConcernMajorityNotAvailableYet.json new file mode 100644 index 0000000000..8aa6a6b5e5 --- /dev/null +++ b/test/retryable_reads/unified/readConcernMajorityNotAvailableYet.json @@ -0,0 +1,147 @@ +{ + "description": "ReadConcernMajorityNotAvailableYet is a retryable read", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "readconcernmajoritynotavailableyet_test" + } + } + ], + "initialData": [ + { + "collectionName": "readconcernmajoritynotavailableyet_test", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds on second attempt after ReadConcernMajorityNotAvailableYet", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 134 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "readconcernmajoritynotavailableyet_test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "readconcernmajoritynotavailableyet_test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/legacy/bulkWrite-errorLabels.json b/test/retryable_writes/legacy/bulkWrite-errorLabels.json index 66c3ecb336..773c453bc7 100644 --- a/test/retryable_writes/legacy/bulkWrite-errorLabels.json +++ b/test/retryable_writes/legacy/bulkWrite-errorLabels.json @@ -178,6 +178,175 @@ ] } } + }, + { + "description": "BulkWrite succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "result": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "1": 3 + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "BulkWrite succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "result": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "1": 3 + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } } ] } diff --git a/test/retryable_writes/legacy/bulkWrite-serverErrors.json b/test/retryable_writes/legacy/bulkWrite-serverErrors.json index 1e6cc74c05..9ef89a9aa5 100644 --- a/test/retryable_writes/legacy/bulkWrite-serverErrors.json +++ b/test/retryable_writes/legacy/bulkWrite-serverErrors.json @@ -25,175 +25,6 @@ } ], "tests": [ - { - "description": "BulkWrite succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 1, - "insertedCount": 1, - "insertedIds": { - "1": 3 - }, - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "BulkWrite succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 1, - "insertedCount": 1, - "insertedIds": { - "1": 3 - }, - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, { "description": "BulkWrite fails with a RetryableWriteError label after two connection failures", "failPoint": { diff --git a/test/retryable_writes/legacy/deleteOne-errorLabels.json b/test/retryable_writes/legacy/deleteOne-errorLabels.json index c14692fd1a..f19b14954b 100644 --- a/test/retryable_writes/legacy/deleteOne-errorLabels.json +++ b/test/retryable_writes/legacy/deleteOne-errorLabels.json @@ -102,6 +102,87 @@ ] } } + }, + { + "description": "DeleteOne succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + "outcome": { + "result": { + "deletedCount": 1 + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "DeleteOne succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + "outcome": { + "result": { + "deletedCount": 1 + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/retryable_writes/legacy/deleteOne-serverErrors.json b/test/retryable_writes/legacy/deleteOne-serverErrors.json index a1a27838de..5b6f4142f7 100644 --- a/test/retryable_writes/legacy/deleteOne-serverErrors.json +++ b/test/retryable_writes/legacy/deleteOne-serverErrors.json @@ -25,87 +25,6 @@ } ], "tests": [ - { - "description": "DeleteOne succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "outcome": { - "result": { - "deletedCount": 1 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "DeleteOne succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "outcome": { - "result": { - "deletedCount": 1 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, { "description": "DeleteOne fails with RetryableWriteError label after two connection failures", "failPoint": { diff --git a/test/retryable_writes/legacy/findOneAndDelete-errorLabels.json b/test/retryable_writes/legacy/findOneAndDelete-errorLabels.json index 60e6e0a7bc..bdaa0ed8c9 100644 --- a/test/retryable_writes/legacy/findOneAndDelete-errorLabels.json +++ b/test/retryable_writes/legacy/findOneAndDelete-errorLabels.json @@ -113,6 +113,99 @@ ] } } + }, + { + "description": "FindOneAndDelete succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndDelete succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json b/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json index c18b63f456..d4f6cb3d18 100644 --- a/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json +++ b/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json @@ -25,99 +25,6 @@ } ], "tests": [ - { - "description": "FindOneAndDelete succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "x": { - "$gte": 11 - } - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndDelete succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "x": { - "$gte": 11 - } - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, { "description": "FindOneAndDelete fails with a RetryableWriteError label after two connection failures", "failPoint": { diff --git a/test/retryable_writes/legacy/findOneAndReplace-errorLabels.json b/test/retryable_writes/legacy/findOneAndReplace-errorLabels.json index afa2f47af4..7517a43f9a 100644 --- a/test/retryable_writes/legacy/findOneAndReplace-errorLabels.json +++ b/test/retryable_writes/legacy/findOneAndReplace-errorLabels.json @@ -117,6 +117,107 @@ ] } } + }, + { + "description": "FindOneAndReplace succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndReplace succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json b/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json index 944a3af848..f2c642087b 100644 --- a/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json +++ b/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json @@ -25,107 +25,6 @@ } ], "tests": [ - { - "description": "FindOneAndReplace succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndReplace succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, { "description": "FindOneAndReplace fails with a RetryableWriteError label after two connection failures", "failPoint": { diff --git a/test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json b/test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json index 19b3a9e771..135b7a5c85 100644 --- a/test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json +++ b/test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json @@ -119,6 +119,109 @@ ] } } + }, + { + "description": "FindOneAndUpdate succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndUpdate succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json b/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json index e83a610615..7162a38de0 100644 --- a/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json +++ b/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json @@ -25,109 +25,6 @@ } ], "tests": [ - { - "description": "FindOneAndUpdate succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, { "description": "FindOneAndUpdate fails with a RetryableWriteError label after two connection failures", "failPoint": { diff --git a/test/retryable_writes/legacy/insertMany-errorLabels.json b/test/retryable_writes/legacy/insertMany-errorLabels.json index 65fd377fa6..5afb7fc5d7 100644 --- a/test/retryable_writes/legacy/insertMany-errorLabels.json +++ b/test/retryable_writes/legacy/insertMany-errorLabels.json @@ -125,6 +125,129 @@ ] } } + }, + { + "description": "InsertMany succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "result": { + "insertedIds": { + "0": 2, + "1": 3 + } + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "InsertMany succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "result": { + "insertedIds": { + "0": 2, + "1": 3 + } + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } } ] } diff --git a/test/retryable_writes/legacy/insertMany-serverErrors.json b/test/retryable_writes/legacy/insertMany-serverErrors.json index fe8dbf4a62..8b55f13538 100644 --- a/test/retryable_writes/legacy/insertMany-serverErrors.json +++ b/test/retryable_writes/legacy/insertMany-serverErrors.json @@ -21,129 +21,6 @@ } ], "tests": [ - { - "description": "InsertMany succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertMany succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, { "description": "InsertMany fails with a RetryableWriteError label after two connection failures", "failPoint": { diff --git a/test/retryable_writes/legacy/insertOne-errorLabels.json b/test/retryable_writes/legacy/insertOne-errorLabels.json index d90ac5dfbd..74072d5a86 100644 --- a/test/retryable_writes/legacy/insertOne-errorLabels.json +++ b/test/retryable_writes/legacy/insertOne-errorLabels.json @@ -86,6 +86,716 @@ "data": [] } } + }, + { + "description": "InsertOne succeeds after NotWritablePrimary", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after NotPrimaryOrSecondary", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after NotPrimaryNoSecondaryOk", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after InterruptedDueToReplStateChange", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after InterruptedAtShutdown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after HostNotFound", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after HostUnreachable", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after SocketException", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after NetworkTimeout", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after ExceededTimeLimit", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 262, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after WriteConcernError InterruptedAtShutdown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11600, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after WriteConcernError InterruptedDueToReplStateChange", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11602, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after WriteConcernError PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 189, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne fails after multiple retryable writeConcernErrors", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } } ] } diff --git a/test/retryable_writes/legacy/insertOne-serverErrors.json b/test/retryable_writes/legacy/insertOne-serverErrors.json index 5179a6ab75..51ce59a4b9 100644 --- a/test/retryable_writes/legacy/insertOne-serverErrors.json +++ b/test/retryable_writes/legacy/insertOne-serverErrors.json @@ -117,594 +117,6 @@ } } }, - { - "description": "InsertOne succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 10107, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 13436, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 13435, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 11602, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 11600, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 91, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 7, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 6, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 9001, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 89, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after ExceededTimeLimit", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 262, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, { "description": "InsertOne fails after Interrupted", "failPoint": { @@ -750,264 +162,6 @@ } } }, - { - "description": "InsertOne succeeds after WriteConcernError InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 11600, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after WriteConcernError InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 11602, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after WriteConcernError PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 189, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne fails after multiple retryable writeConcernErrors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsContain": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, { "description": "InsertOne fails after WriteConcernError Interrupted", "failPoint": { diff --git a/test/retryable_writes/legacy/replaceOne-errorLabels.json b/test/retryable_writes/legacy/replaceOne-errorLabels.json index 6029b875dc..67218b790e 100644 --- a/test/retryable_writes/legacy/replaceOne-errorLabels.json +++ b/test/retryable_writes/legacy/replaceOne-errorLabels.json @@ -116,6 +116,107 @@ ] } } + }, + { + "description": "ReplaceOne succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + } + }, + "outcome": { + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "ReplaceOne succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + } + }, + "outcome": { + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/retryable_writes/legacy/replaceOne-serverErrors.json b/test/retryable_writes/legacy/replaceOne-serverErrors.json index 6b35722e12..9ffb83452b 100644 --- a/test/retryable_writes/legacy/replaceOne-serverErrors.json +++ b/test/retryable_writes/legacy/replaceOne-serverErrors.json @@ -25,107 +25,6 @@ } ], "tests": [ - { - "description": "ReplaceOne succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "ReplaceOne succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, { "description": "ReplaceOne fails with a RetryableWriteError label after two connection failures", "failPoint": { diff --git a/test/retryable_writes/legacy/updateOne-errorLabels.json b/test/retryable_writes/legacy/updateOne-errorLabels.json index 5bd00cde90..b9ba9fdb62 100644 --- a/test/retryable_writes/legacy/updateOne-errorLabels.json +++ b/test/retryable_writes/legacy/updateOne-errorLabels.json @@ -118,6 +118,109 @@ ] } } + }, + { + "description": "UpdateOne succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + "outcome": { + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "UpdateOne succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + "outcome": { + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/retryable_writes/legacy/updateOne-serverErrors.json b/test/retryable_writes/legacy/updateOne-serverErrors.json index cf274f57e0..300f135229 100644 --- a/test/retryable_writes/legacy/updateOne-serverErrors.json +++ b/test/retryable_writes/legacy/updateOne-serverErrors.json @@ -25,109 +25,6 @@ } ], "tests": [ - { - "description": "UpdateOne succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "UpdateOne succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, { "description": "UpdateOne fails with a RetryableWriteError label after two connection failures", "failPoint": { diff --git a/test/retryable_writes/unified/bulkWrite-serverErrors.json b/test/retryable_writes/unified/bulkWrite-serverErrors.json index 23cf2869a6..737a1555e8 100644 --- a/test/retryable_writes/unified/bulkWrite-serverErrors.json +++ b/test/retryable_writes/unified/bulkWrite-serverErrors.json @@ -3,10 +3,16 @@ "schemaVersion": "1.0", "runOnRequirements": [ { - "minServerVersion": "3.6", + "minServerVersion": "4.0", "topologies": [ "replicaset" ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded" + ] } ], "createEntities": [ @@ -55,16 +61,7 @@ "description": "BulkWrite succeeds after retryable writeConcernError in first batch", "runOnRequirements": [ { - "minServerVersion": "4.0", - "topologies": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topologies": [ - "sharded-replicaset" - ] + "minServerVersion": "4.3.1" } ], "operations": [ diff --git a/test/retryable_writes/unified/insertOne-serverErrors.json b/test/retryable_writes/unified/insertOne-serverErrors.json index 77245a8197..89827fcf3f 100644 --- a/test/retryable_writes/unified/insertOne-serverErrors.json +++ b/test/retryable_writes/unified/insertOne-serverErrors.json @@ -3,10 +3,16 @@ "schemaVersion": "1.0", "runOnRequirements": [ { - "minServerVersion": "3.6", + "minServerVersion": "4.0", "topologies": [ "replicaset" ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded" + ] } ], "createEntities": [ @@ -55,16 +61,7 @@ "description": "InsertOne succeeds after retryable writeConcernError", "runOnRequirements": [ { - "minServerVersion": "4.0", - "topologies": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topologies": [ - "sharded-replicaset" - ] + "minServerVersion": "4.3.1" } ], "operations": [ @@ -168,6 +165,309 @@ ] } ] + }, + { + "description": "RetryableWriteError label is added based on top-level code in pre-4.4 server response", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "RetryableWriteError label is added based on writeConcernError in pre-4.4 mongod response", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "RetryableWriteError label is not added based on writeConcernError in pre-4.4 mongos response", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] } ] } diff --git a/test/server_selection_logging/load-balanced.json b/test/server_selection_logging/load-balanced.json new file mode 100644 index 0000000000..5855c4e991 --- /dev/null +++ b/test/server_selection_logging/load-balanced.json @@ -0,0 +1,107 @@ +{ + "description": "server-selection-logging", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "heartbeatFrequencyMS": 500 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + } + ], + "tests": [ + { + "description": "A successful operation - load balanced cluster", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "LoadBalancer" + } + } + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/server_selection_logging/operation-id.json b/test/server_selection_logging/operation-id.json new file mode 100644 index 0000000000..23af7a8a22 --- /dev/null +++ b/test/server_selection_logging/operation-id.json @@ -0,0 +1,229 @@ +{ + "description": "operation-id", + "schemaVersion": "1.14", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appName": "loggingClient", + "serverSelectionTimeoutMS": 2000 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "topologyDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + }, + { + "client": { + "id": "failPointClient" + } + } + ], + "tests": [ + { + "description": "Successful bulkWrite operation: log messages have operationIds", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "x": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + } + ] + } + ] + }, + { + "description": "Failed bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "x": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/server_selection_logging/replica-set.json b/test/server_selection_logging/replica-set.json new file mode 100644 index 0000000000..830b1ea51a --- /dev/null +++ b/test/server_selection_logging/replica-set.json @@ -0,0 +1,228 @@ +{ + "description": "replica-set-logging", + "schemaVersion": "1.14", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "serverSelectionTimeoutMS": 2000 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "topologyDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + }, + { + "client": { + "id": "failPointClient" + } + }, + { + "collection": { + "id": "unsatisfiableRPColl", + "database": "database", + "collectionName": "unsatisfiableRPColl", + "collectionOptions": { + "readPreference": { + "mode": "Secondary", + "tagSets": [ + { + "nonexistenttag": "a" + } + ] + } + } + } + } + ], + "tests": [ + { + "description": "A successful operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 4 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Server selection fails due to unsatisfiable read preference", + "runOnRequirements": [ + { + "minServerVersion": "4.0" + } + ], + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 4 + } + }, + { + "name": "find", + "object": "unsatisfiableRPColl", + "arguments": { + "filter": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + }, + "remainingTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/server_selection_logging/sharded.json b/test/server_selection_logging/sharded.json new file mode 100644 index 0000000000..346c050f9e --- /dev/null +++ b/test/server_selection_logging/sharded.json @@ -0,0 +1,237 @@ +{ + "description": "server-selection-logging", + "schemaVersion": "1.14", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appName": "loggingClient", + "serverSelectionTimeoutMS": 2000 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "topologyDescriptionChangedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + }, + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "tests": [ + { + "description": "A successful operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Failure due to unreachable server", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "remainingTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/server_selection_logging/standalone.json b/test/server_selection_logging/standalone.json new file mode 100644 index 0000000000..3152d0bbf3 --- /dev/null +++ b/test/server_selection_logging/standalone.json @@ -0,0 +1,1161 @@ +{ + "description": "standalone-logging", + "schemaVersion": "1.14", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appName": "loggingClient", + "serverSelectionTimeoutMS": 2000 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "topologyDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + }, + { + "client": { + "id": "failPointClient" + } + } + ], + "initialData": [ + { + "collectionName": "server-selection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful insert operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Failure due to unreachable server", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "remainingTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "A successful find operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": { + "x": 1 + } + } + } + + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful findAndModify operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": { + "x": 1 + }, + "replacement": { + "x": 11 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "findAndModify", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "findAndModify", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful find and getMore operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "batchSize": 3 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "getMore", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "getMore", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful aggregate operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "aggregate", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "aggregate", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful count operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "count", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "count", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful distinct operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "distinct", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "distinct", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Successful collection management operations", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "foo" + } + }, + { + "name": "listCollections", + "object": "database" + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "foo" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "create", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "create", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "listCollections", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "listCollections", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "drop", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "drop", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Successful index operations", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "listIndexes", + "object": "collection" + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "createIndexes", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "createIndexes", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "listIndexes", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "listIndexes", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "dropIndexes", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "dropIndexes", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful update operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "x": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "update", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "update", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful delete operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "delete", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "delete", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json index 3f500acdc6..8e459115c1 100644 --- a/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json +++ b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json @@ -10,5 +10,6 @@ "loadBalanced": true, "ssl": true, "directConnection": false - } + }, + "ping": true } diff --git a/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json index f9719e760d..39bff5a23b 100644 --- a/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json +++ b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json @@ -9,5 +9,6 @@ "options": { "loadBalanced": true, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json index a18360ea64..474a314fd7 100644 --- a/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json @@ -10,5 +10,6 @@ "loadBalanced": true, "srvMaxHosts": 0, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json index bd85418117..dfc90dc96d 100644 --- a/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json @@ -10,5 +10,6 @@ "loadBalanced": true, "srvMaxHosts": 0, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/direct-connection-false.json b/test/srv_seedlist/replica-set/direct-connection-false.json index 1d57bdcb3c..3f14ff94e7 100644 --- a/test/srv_seedlist/replica-set/direct-connection-false.json +++ b/test/srv_seedlist/replica-set/direct-connection-false.json @@ -11,5 +11,6 @@ "options": { "ssl": true, "directConnection": false - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json b/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json index 70c6c23a39..4493628be9 100644 --- a/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json +++ b/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json @@ -17,5 +17,6 @@ "password": "$4to@L8=MC", "db": "mydb?" }, + "ping": false, "comment": "Encoded user, pass, and DB parse correctly" } diff --git a/test/srv_seedlist/replica-set/loadBalanced-false-txt.json b/test/srv_seedlist/replica-set/loadBalanced-false-txt.json index fd2e565c7b..682d32a742 100644 --- a/test/srv_seedlist/replica-set/loadBalanced-false-txt.json +++ b/test/srv_seedlist/replica-set/loadBalanced-false-txt.json @@ -11,5 +11,6 @@ "options": { "loadBalanced": false, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/longer-parent-in-return.json b/test/srv_seedlist/replica-set/longer-parent-in-return.json index 9a8267eaeb..ebe3fe1e77 100644 --- a/test/srv_seedlist/replica-set/longer-parent-in-return.json +++ b/test/srv_seedlist/replica-set/longer-parent-in-return.json @@ -12,5 +12,6 @@ "replicaSet": "repl0", "ssl": true }, + "ping": true, "comment": "Is correct, as returned host name shared the URI root \"test.build.10gen.cc\"." } diff --git a/test/srv_seedlist/replica-set/one-result-default-port.json b/test/srv_seedlist/replica-set/one-result-default-port.json index cebb3b1ec3..9f7733de80 100644 --- a/test/srv_seedlist/replica-set/one-result-default-port.json +++ b/test/srv_seedlist/replica-set/one-result-default-port.json @@ -11,5 +11,6 @@ "options": { "replicaSet": "repl0", "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json b/test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json index 622668c351..1d740b1b59 100644 --- a/test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json +++ b/test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json @@ -11,5 +11,6 @@ "options": { "replicaSet": "repl0", "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/one-txt-record.json b/test/srv_seedlist/replica-set/one-txt-record.json index 2385021ad4..ecdb0a7e2a 100644 --- a/test/srv_seedlist/replica-set/one-txt-record.json +++ b/test/srv_seedlist/replica-set/one-txt-record.json @@ -12,5 +12,6 @@ "replicaSet": "repl0", "authSource": "thisDB", "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/srv-service-name.json b/test/srv_seedlist/replica-set/srv-service-name.json index ec36cdbb00..e320c2ca3e 100644 --- a/test/srv_seedlist/replica-set/srv-service-name.json +++ b/test/srv_seedlist/replica-set/srv-service-name.json @@ -12,5 +12,6 @@ "options": { "ssl": true, "srvServiceName": "customname" - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json index d9765ac663..70edacfd06 100644 --- a/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json +++ b/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json @@ -13,5 +13,6 @@ "options": { "srvMaxHosts": 2, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json index 494bb87687..72540ed408 100644 --- a/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json +++ b/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json @@ -12,5 +12,6 @@ "options": { "srvMaxHosts": 3, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json index 66a5e90dad..a9d6dd6fd9 100644 --- a/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json +++ b/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json @@ -9,5 +9,6 @@ "options": { "srvMaxHosts": 1, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json b/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json index 241a901c64..e232edb9eb 100644 --- a/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json +++ b/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json @@ -13,5 +13,6 @@ "replicaSet": "repl0", "srvMaxHosts": 0, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-zero.json b/test/srv_seedlist/replica-set/srvMaxHosts-zero.json index c68610a201..3421a35a3d 100644 --- a/test/srv_seedlist/replica-set/srvMaxHosts-zero.json +++ b/test/srv_seedlist/replica-set/srvMaxHosts-zero.json @@ -13,5 +13,6 @@ "replicaSet": "repl0", "srvMaxHosts": 0, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/two-results-default-port.json b/test/srv_seedlist/replica-set/two-results-default-port.json index 66028310a6..43efcc6310 100644 --- a/test/srv_seedlist/replica-set/two-results-default-port.json +++ b/test/srv_seedlist/replica-set/two-results-default-port.json @@ -12,5 +12,6 @@ "options": { "replicaSet": "repl0", "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/two-results-nonstandard-port.json b/test/srv_seedlist/replica-set/two-results-nonstandard-port.json index 4900f7cff1..f6e8e415a7 100644 --- a/test/srv_seedlist/replica-set/two-results-nonstandard-port.json +++ b/test/srv_seedlist/replica-set/two-results-nonstandard-port.json @@ -12,5 +12,6 @@ "options": { "replicaSet": "repl0", "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json b/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json index 0ebc737bd5..3d84cfe446 100644 --- a/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json +++ b/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json @@ -12,5 +12,6 @@ "replicaSet": "repl0", "authSource": "thisDB", "ssl": false - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json b/test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json index 2626ba6083..1a5a240680 100644 --- a/test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json +++ b/test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json @@ -12,5 +12,6 @@ "replicaSet": "repl0", "authSource": "otherDB", "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/uri-with-admin-database.json b/test/srv_seedlist/replica-set/uri-with-admin-database.json index 32710d75f7..c5513a0dad 100644 --- a/test/srv_seedlist/replica-set/uri-with-admin-database.json +++ b/test/srv_seedlist/replica-set/uri-with-admin-database.json @@ -15,5 +15,6 @@ }, "parsed_options": { "auth_database": "adminDB" - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/uri-with-auth.json b/test/srv_seedlist/replica-set/uri-with-auth.json index cc7257d85b..872f997cc7 100644 --- a/test/srv_seedlist/replica-set/uri-with-auth.json +++ b/test/srv_seedlist/replica-set/uri-with-auth.json @@ -9,9 +9,14 @@ "localhost:27018", "localhost:27019" ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, "parsed_options": { "user": "auser", "password": "apass" }, + "ping": false, "comment": "Should preserve auth credentials" } diff --git a/test/srv_seedlist/replica-set/uri-with-uppercase-hostname.json b/test/srv_seedlist/replica-set/uri-with-uppercase-hostname.json new file mode 100644 index 0000000000..40579aa44c --- /dev/null +++ b/test/srv_seedlist/replica-set/uri-with-uppercase-hostname.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://TEST1.TEST.BUILD.10GEN.CC", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json index 46390726f0..7d2f9a6bf8 100644 --- a/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json +++ b/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json @@ -12,5 +12,6 @@ "options": { "srvMaxHosts": 2, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json index e02d72bf28..452c7b54db 100644 --- a/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json +++ b/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json @@ -11,5 +11,6 @@ "options": { "srvMaxHosts": 3, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json index fdcc1692c0..cd3bf65117 100644 --- a/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json +++ b/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json @@ -5,5 +5,6 @@ "options": { "srvMaxHosts": 1, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/sharded/srvMaxHosts-zero.json b/test/srv_seedlist/sharded/srvMaxHosts-zero.json index 10ab9e656d..f289628c9c 100644 --- a/test/srv_seedlist/sharded/srvMaxHosts-zero.json +++ b/test/srv_seedlist/sharded/srvMaxHosts-zero.json @@ -11,5 +11,6 @@ "options": { "srvMaxHosts": 0, "ssl": true - } + }, + "ping": true } diff --git a/test/test_auth.py b/test/test_auth.py index 2240a4b5b9..74089bd68e 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -269,7 +269,6 @@ def setUpClass(cls): raise SkipTest("Must set SASL_HOST, SASL_USER, and SASL_PASS to test SASL") def test_sasl_plain(self): - client = MongoClient( SASL_HOST, SASL_PORT, @@ -344,6 +343,7 @@ def tearDown(self): client_context.drop_user("pymongo_test", "user") super().tearDown() + @client_context.require_no_fips def test_scram_sha1(self): host, port = client_context.host, client_context.port @@ -405,6 +405,7 @@ def test_scram_skip_empty_exchange(self): else: self.assertEqual(started, ["saslStart", "saslContinue", "saslContinue"]) + @client_context.require_no_fips def test_scram(self): # Step 1: create users client_context.create_user( diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 4976a6dd49..6cd037e204 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -19,6 +19,7 @@ import json import os import sys +import warnings sys.path[0:0] = [""] @@ -26,6 +27,7 @@ from test.unified_format import generate_test_classes from pymongo import MongoClient +from pymongo.auth_oidc import OIDCCallback _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") @@ -34,6 +36,11 @@ class TestAuthSpec(unittest.TestCase): pass +class SampleHumanCallback(OIDCCallback): + def fetch(self, context): + pass + + def create_test(test_case): def run_test(self): uri = test_case["uri"] @@ -41,15 +48,11 @@ def run_test(self): credential = test_case.get("credential") if not valid: - self.assertRaises(Exception, MongoClient, uri, connect=False) + with warnings.catch_warnings(): + warnings.simplefilter("default") + self.assertRaises(Exception, MongoClient, uri, connect=False) else: - props = {} - if credential: - props = credential["mechanism_properties"] or {} - if props.get("REQUEST_TOKEN_CALLBACK"): - props["request_token_callback"] = lambda x, y: 1 - del props["REQUEST_TOKEN_CALLBACK"] - client = MongoClient(uri, connect=False, authmechanismproperties=props) + client = MongoClient(uri, connect=False) credentials = client.options.pool_options._credentials if credential is None: self.assertIsNone(credentials) @@ -65,27 +68,8 @@ def run_test(self): expected = credential["mechanism_properties"] if expected is not None: actual = credentials.mechanism_properties - for key, _val in expected.items(): - if "SERVICE_NAME" in expected: - self.assertEqual(actual.service_name, expected["SERVICE_NAME"]) - elif "CANONICALIZE_HOST_NAME" in expected: - self.assertEqual( - actual.canonicalize_host_name, expected["CANONICALIZE_HOST_NAME"] - ) - elif "SERVICE_REALM" in expected: - self.assertEqual(actual.service_realm, expected["SERVICE_REALM"]) - elif "AWS_SESSION_TOKEN" in expected: - self.assertEqual( - actual.aws_session_token, expected["AWS_SESSION_TOKEN"] - ) - elif "PROVIDER_NAME" in expected: - self.assertEqual(actual.provider_name, expected["PROVIDER_NAME"]) - elif "request_token_callback" in expected: - self.assertEqual( - actual.request_token_callback, expected["request_token_callback"] - ) - else: - self.fail(f"Unhandled property: {key}") + for key, value in expected.items(): + self.assertEqual(getattr(actual, key.lower()), value) else: if credential["mechanism"] == "MONGODB-AWS": self.assertIsNone(credentials.mechanism_properties.aws_session_token) diff --git a/test/test_binary.py b/test/test_binary.py index fafb6da162..517d633aa4 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -22,6 +22,7 @@ import pickle import sys import uuid +from typing import Any sys.path[0:0] = [""] @@ -276,7 +277,6 @@ def test_legacy_csharp_uuid_roundtrip(self): client_context.client.pymongo_test.drop_collection("csharp_uuid") def test_uri_to_uuid(self): - uri = "mongodb://foo/?uuidrepresentation=csharpLegacy" client = MongoClient(uri, connect=False) self.assertEqual(client.pymongo_test.test.codec_options.uuid_representation, CSHARP_LEGACY) @@ -496,7 +496,7 @@ def test_encoding_4(self): # Implicit encoding prose test #5 def test_encoding_5(self): with self.assertRaises(ValueError): - self._test_encoding("unspecifed", "dummy", -1) + self._test_encoding("unspecified", "dummy", -1) def _test_decoding( self, diff --git a/test/test_bson.py b/test/test_bson.py index 749c63bdf3..fec84090d2 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -23,6 +23,7 @@ import os import pickle import re +import struct import sys import tempfile import uuid @@ -48,7 +49,7 @@ encode, is_valid, ) -from bson.binary import Binary, UuidRepresentation +from bson.binary import USER_DEFINED_SUBTYPE, Binary, UuidRepresentation from bson.code import Code from bson.codec_options import CodecOptions, DatetimeConversion from bson.datetime_ms import _DATETIME_ERROR_SUGGESTION @@ -489,6 +490,33 @@ def test_basic_encode(self): b"\x00", ) + def test_bad_code(self): + # Assert that decoding invalid Code with scope does not include a field name. + def generate_payload(length: int) -> bytes: + string_size = length - 0x1E + + return bytes.fromhex( + struct.pack(":@" + s for s in normal_hosts] + multi_host = ( + "host.cosmos.azure.com,host.docdb.amazonaws.com,host.docdb-elastic.amazonaws.com" + ) + with self.assertLogs("pymongo", level="INFO") as cm: + for host in normal_hosts: + MongoClient(host) + for host in srv_hosts: + mock_get_hosts.return_value = [(host, 1)] + MongoClient(host) + MongoClient(multi_host) + logs = [record.message for record in cm.records if record.name == "pymongo.client"] + self.assertEqual(len(logs), 7) + + @patch("pymongo.srv_resolver._SrvResolver.get_hosts") + def test_detected_environment_warning(self, mock_get_hosts): + with self._caplog.at_level(logging.WARN): + normal_hosts = [ + "host.cosmos.azure.com", + "host.docdb.amazonaws.com", + "host.docdb-elastic.amazonaws.com", + ] + srv_hosts = ["mongodb+srv://:@" + s for s in normal_hosts] + multi_host = ( + "host.cosmos.azure.com,host.docdb.amazonaws.com,host.docdb-elastic.amazonaws.com" + ) + for host in normal_hosts: + with self.assertWarns(UserWarning): + MongoClient(host) + for host in srv_hosts: + mock_get_hosts.return_value = [(host, 1)] + with self.assertWarns(UserWarning): + MongoClient(host) + with self.assertWarns(UserWarning): + MongoClient(multi_host) + class TestClient(IntegrationTest): def test_multiple_uris(self): @@ -541,7 +611,7 @@ def test_max_idle_time_reaper_default(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper doesn't remove connections when maxIdleTimeMS not set client = rs_or_single_client() - server = client._get_topology().select_server(readable_server_selector) + server = client._get_topology().select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass self.assertEqual(1, len(server._pool.conns)) @@ -552,7 +622,7 @@ def test_max_idle_time_reaper_removes_stale_minPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper removes idle socket and replaces it with a new one client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) - server = client._get_topology().select_server(readable_server_selector) + server = client._get_topology().select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass # When the reaper runs at the same time as the get_socket, two @@ -566,7 +636,7 @@ def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper respects maxPoolSize when adding new connections. client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1) - server = client._get_topology().select_server(readable_server_selector) + server = client._get_topology().select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass # When the reaper runs at the same time as the get_socket, @@ -580,7 +650,7 @@ def test_max_idle_time_reaper_removes_stale(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper has removed idle socket and NOT replaced it client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server(readable_server_selector) + server = client._get_topology().select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn_one: pass # Assert that the pool does not close connections prematurely. @@ -597,14 +667,15 @@ def test_max_idle_time_reaper_removes_stale(self): def test_min_pool_size(self): with client_knobs(kill_cursor_frequency=0.1): client = rs_or_single_client() - server = client._get_topology().select_server(readable_server_selector) + server = client._get_topology().select_server(readable_server_selector, _Op.TEST) self.assertEqual(0, len(server._pool.conns)) # Assert that pool started up at minPoolSize client = rs_or_single_client(minPoolSize=10) - server = client._get_topology().select_server(readable_server_selector) + server = client._get_topology().select_server(readable_server_selector, _Op.TEST) wait_until( - lambda: len(server._pool.conns) == 10, "pool initialized with 10 connections" + lambda: len(server._pool.conns) == 10, + "pool initialized with 10 connections", ) # Assert that if a socket is closed, a new one takes its place @@ -620,7 +691,7 @@ def test_max_idle_time_checkout(self): # Use high frequency to test _get_socket_no_auth. with client_knobs(kill_cursor_frequency=99999999): client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server(readable_server_selector) + server = client._get_topology().select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass self.assertEqual(1, len(server._pool.conns)) @@ -634,7 +705,7 @@ def test_max_idle_time_checkout(self): # Test that connections are reused if maxIdleTimeMS is not set. client = rs_or_single_client() - server = client._get_topology().select_server(readable_server_selector) + server = client._get_topology().select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass self.assertEqual(1, len(server._pool.conns)) @@ -945,6 +1016,7 @@ def test_bad_uri(self): MongoClient("http://localhost") @client_context.require_auth + @client_context.require_no_fips def test_auth_from_uri(self): host, port = client_context.host, client_context.port client_context.create_user("admin", "admin", "pass") @@ -1001,6 +1073,7 @@ def test_username_and_password(self): rs_or_single_client_noauth(username="ad min", password="foo").server_info() @client_context.require_auth + @client_context.require_no_fips def test_lazy_auth_raises_operation_failure(self): lazy_client = rs_or_single_client_noauth( f"mongodb://user:wrong@{client_context.host}/pymongo_test", connect=False @@ -1487,7 +1560,7 @@ def compression_settings(client): self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) - if not _HAVE_SNAPPY: + if not _have_snappy(): uri = "mongodb://localhost:27017/?compressors=snappy" client = MongoClient(uri, connect=False) opts = compression_settings(client) @@ -1502,7 +1575,7 @@ def compression_settings(client): opts = compression_settings(client) self.assertEqual(opts.compressors, ["snappy", "zlib"]) - if not _HAVE_ZSTD: + if not _have_zstd(): uri = "mongodb://localhost:27017/?compressors=zstd" client = MongoClient(uri, connect=False) opts = compression_settings(client) @@ -1682,7 +1755,6 @@ def test_process_periodic_tasks(self): with self.assertRaises(InvalidOperation): coll.insert_many([{} for _ in range(5)]) - @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_service_name_from_kwargs(self): client = MongoClient( "mongodb+srv://user:password@test22.test.build.10gen.cc", @@ -1703,7 +1775,6 @@ def test_service_name_from_kwargs(self): ) self.assertEqual(client._topology_settings.srv_service_name, "customname") - @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_srv_max_hosts_kwarg(self): client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/") self.assertGreater(len(client.topology_description.server_descriptions()), 1) @@ -1714,11 +1785,6 @@ def test_srv_max_hosts_kwarg(self): ) self.assertEqual(len(client.topology_description.server_descriptions()), 2) - @unittest.skipIf(_HAVE_DNSPYTHON, "dnspython must not be installed") - def test_srv_no_dnspython_error(self): - with self.assertRaisesRegex(ConfigurationError, 'The "dnspython" module must be'): - MongoClient("mongodb+srv://test1.test.build.10gen.cc/") - @unittest.skipIf( client_context.load_balancer or client_context.serverless, "loadBalanced clients do not run SDAM", @@ -2160,7 +2226,7 @@ def test_reconnect(self): # But it can reconnect. c.revive_host("a:1") - c._get_topology().select_servers(writable_server_selector) + c._get_topology().select_servers(writable_server_selector, _Op.TEST) self.assertEqual(c.address, ("a", 1)) def _test_network_error(self, operation_callback): @@ -2183,7 +2249,7 @@ def _test_network_error(self, operation_callback): # Set host-specific information so we can test whether it is reset. c.set_wire_version_range("a:1", 2, 6) c.set_wire_version_range("b:2", 2, 7) - c._get_topology().select_servers(writable_server_selector) + c._get_topology().select_servers(writable_server_selector, _Op.TEST) wait_until(lambda: len(c.nodes) == 2, "connect") c.kill_host("a:1") diff --git a/test/test_collection.py b/test/test_collection.py index 4947192453..1667a3dd03 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -20,7 +20,7 @@ import sys from codecs import utf_8_decode from collections import defaultdict -from typing import Iterable, no_type_check +from typing import Any, Iterable, no_type_check from pymongo.database import Database @@ -691,7 +691,7 @@ def test_insert_one(self): db = self.db db.test.drop() - document = {"_id": 1000} + document: dict[str, Any] = {"_id": 1000} result = db.test.insert_one(document) self.assertTrue(isinstance(result, InsertOneResult)) self.assertTrue(isinstance(result.inserted_id, int)) @@ -2124,9 +2124,7 @@ def test_find_command_generation(self): None, None, ) - self.assertEqual( - cmd.to_dict(), SON([("find", "coll"), ("$dumb", 2), ("filter", {"foo": 1})]).to_dict() - ) + self.assertEqual(cmd, {"find": "coll", "$dumb": 2, "filter": {"foo": 1}}) def test_bool(self): with self.assertRaises(NotImplementedError): diff --git a/test/test_command_logging.py b/test/test_command_logging.py new file mode 100644 index 0000000000..9b2d52e66b --- /dev/null +++ b/test/test_command_logging.py @@ -0,0 +1,37 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the command monitoring unified format spec tests.""" +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "command_logging") + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_comment.py b/test/test_comment.py index baac68be58..ffbf8d51ca 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -42,7 +42,12 @@ def empty(self, *args, **kwargs): class TestComment(IntegrationTest): def _test_ops( - self, helpers, already_supported, listener, db=Empty(), coll=Empty() # noqa: B008 + self, + helpers, + already_supported, + listener, + db=Empty(), # noqa: B008 + coll=Empty(), # noqa: B008 ): for h, args in helpers: c = "testing comment with " + h.__name__ @@ -84,7 +89,7 @@ def _test_ops( self.assertTrue(tested) if h not in [coll.aggregate_raw_batches]: self.assertIn( - "`comment` (optional):", + ":param comment:", h.__doc__, ) if h not in already_supported: diff --git a/test/test_connection_logging.py b/test/test_connection_logging.py new file mode 100644 index 0000000000..262ce821eb --- /dev/null +++ b/test/test_connection_logging.py @@ -0,0 +1,39 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the connection logging unified format spec tests.""" +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "connection_logging") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_cmap.py b/test/test_connection_monitoring.py similarity index 93% rename from test/test_cmap.py rename to test/test_connection_monitoring.py index 59757434e4..8c70c8f9a3 100644 --- a/test/test_cmap.py +++ b/test/test_connection_monitoring.py @@ -85,7 +85,7 @@ class TestCMAP(IntegrationTest): # Location of JSON test specifications. - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "cmap") + TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "connection_monitoring") # Test operations: @@ -144,7 +144,10 @@ def ready(self, op): def clear(self, op): """Run the 'clear' operation.""" - self.pool.reset() + if "interruptInUseConnections" in op: + self.pool.reset(interrupt_connections=op["interruptInUseConnections"]) + else: + self.pool.reset() def close(self, op): """Run the 'close' operation.""" @@ -173,6 +176,8 @@ def check_object(self, actual, expected): if attr == "type": continue c2s = camel_to_snake(attr) + if c2s == "interrupt_in_use_connections": + c2s = "interrupt_connections" actual_val = getattr(actual, c2s) if expected_val == 42: self.assertIsNotNone(actual_val) @@ -215,6 +220,11 @@ def set_fail_point(self, command_args): def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" + if ( + scenario_def["description"] + == "clear with interruptInUseConnections = true closes pending connections" + ): + self.skipTest("Skip pending PYTHON-4414") self.logs: list = [] self.assertEqual(scenario_def["version"], 1) self.assertIn(scenario_def["style"], ["unit", "integration"]) @@ -361,7 +371,7 @@ def test_4_subscribe_to_events(self): self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 2) client.close() - self.assertEqual(listener.event_count(PoolClearedEvent), 1) + self.assertEqual(listener.event_count(PoolClosedEvent), 1) self.assertEqual(listener.event_count(ConnectionClosedEvent), 1) def test_5_check_out_fails_connection_error(self): @@ -390,6 +400,7 @@ def mock_connect(*args, **kwargs): failed_event = listener.events[3] self.assertEqual(failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) + @client_context.require_no_fips def test_5_check_out_fails_auth_error(self): listener = CMAPListener() client = single_client_noauth( @@ -421,13 +432,15 @@ def assertRepr(self, obj): def test_events_repr(self): host = ("localhost", 27017) self.assertRepr(ConnectionCheckedInEvent(host, 1)) - self.assertRepr(ConnectionCheckedOutEvent(host, 1)) + self.assertRepr(ConnectionCheckedOutEvent(host, 1, time.monotonic())) self.assertRepr( - ConnectionCheckOutFailedEvent(host, ConnectionCheckOutFailedReason.POOL_CLOSED) + ConnectionCheckOutFailedEvent( + host, ConnectionCheckOutFailedReason.POOL_CLOSED, time.monotonic() + ) ) self.assertRepr(ConnectionClosedEvent(host, 1, ConnectionClosedReason.POOL_CLOSED)) self.assertRepr(ConnectionCreatedEvent(host, 1)) - self.assertRepr(ConnectionReadyEvent(host, 1)) + self.assertRepr(ConnectionReadyEvent(host, 1, time.monotonic())) self.assertRepr(ConnectionCheckOutStartedEvent(host)) self.assertRepr(PoolCreatedEvent(host, {})) self.assertRepr(PoolClearedEvent(host)) @@ -435,18 +448,17 @@ def test_events_repr(self): self.assertRepr(PoolClosedEvent(host)) def test_close_leaves_pool_unpaused(self): - # Needed until we implement PYTHON-2463. This test is related to - # test_threads.TestThreads.test_client_disconnect listener = CMAPListener() client = single_client(event_listeners=[listener]) client.admin.command("ping") pool = get_pool(client) client.close() - self.assertEqual(1, listener.event_count(PoolClearedEvent)) - self.assertEqual(PoolState.READY, pool.state) - # Checking out a connection should succeed - with pool.checkout(): - pass + self.assertEqual(1, listener.event_count(PoolClosedEvent)) + self.assertEqual(PoolState.CLOSED, pool.state) + # Checking out a connection should fail + with self.assertRaises(_PoolClosedError): + with pool.checkout(): + pass def create_test(scenario_def, test, name): diff --git a/test/test_cursor.py b/test/test_cursor.py index 7b10c2cac6..a54e025f55 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -23,6 +23,7 @@ import sys import threading import time +from typing import Any import pymongo @@ -354,7 +355,7 @@ def test_hint(self): db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain, ) - spec = [("num", DESCENDING)] + spec: list[Any] = [("num", DESCENDING)] _ = db.test.create_index(spec) first = next(db.test.find()) @@ -729,7 +730,7 @@ def test_sort(self): random.shuffle(shuffled) db.test.drop() - for (a, b) in shuffled: + for a, b in shuffled: db.test.insert_one({"a": a, "b": b}) result = [ @@ -911,7 +912,8 @@ def test_clone(self): # Ensure hints are cloned as the correct type cursor = self.db.test.find().hint([("z", 1), ("a", 1)]) cursor2 = copy.deepcopy(cursor) - self.assertTrue(isinstance(cursor2._Cursor__hint, SON)) + # Internal types are now dict rather than SON by default + self.assertTrue(isinstance(cursor2._Cursor__hint, dict)) self.assertEqual(cursor._Cursor__hint, cursor2._Cursor__hint) def test_clone_empty(self): @@ -1084,8 +1086,12 @@ def test_concurrent_close(self): def iterate_cursor(): while cursor.alive: - for _doc in cursor: - pass + try: + for _doc in cursor: + pass + except OperationFailure as e: + if e.code != 237: # CursorKilled error code + raise t = threading.Thread(target=iterate_cursor) t.start() diff --git a/test/test_custom_types.py b/test/test_custom_types.py index da4bf03344..aa4b8b0a7d 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -356,7 +356,6 @@ def test_type_checks(self): class TestBSONCustomTypeEncoderAndFallbackEncoderTandem(unittest.TestCase): - TypeA: Any TypeB: Any fallback_encoder_A2B: Any diff --git a/test/test_database.py b/test/test_database.py index b141bb35fb..71383d5c63 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -204,7 +204,7 @@ def test_list_collection_names_filter(self): db.capped.insert_one({}) db.non_capped.insert_one({}) self.addCleanup(client.drop_database, db.name) - filter: Union[None, dict] + filter: Union[None, Mapping[str, Any]] # Should not send nameOnly. for filter in ({"options.capped": True}, {"options.capped": True, "name": "capped"}): listener.reset() @@ -375,13 +375,16 @@ def test_validate_collection(self): self.assertTrue(db.validate_collection(db.test, True, True)) @client_context.require_version_min(4, 3, 3) + @client_context.require_no_standalone def test_validate_collection_background(self): - db = self.client.pymongo_test + db = self.client.pymongo_test.with_options(write_concern=WriteConcern(w="majority")) db.test.insert_one({"dummy": "object"}) coll = db.test self.assertTrue(db.validate_collection(coll, background=False)) # The inMemory storage engine does not support background=True. if client_context.storage_engine != "inMemory": + # background=True requires the collection exist in a checkpoint. + self.client.admin.command("fsync") self.assertTrue(db.validate_collection(coll, background=True)) self.assertTrue(db.validate_collection(coll, scandata=True, background=True)) # The server does not support background=True with full=True. @@ -428,6 +431,7 @@ def test_cursor_command(self): def test_cursor_command_invalid(self): self.assertRaises(InvalidOperation, self.db.cursor_command, "usersInfo", "test") + @client_context.require_no_fips def test_password_digest(self): self.assertRaises(TypeError, auth._password_digest, 5) self.assertRaises(TypeError, auth._password_digest, True) @@ -457,7 +461,7 @@ def test_id_ordering(self): ) cursor = db.test.find() for x in cursor: - for (k, _v) in x.items(): + for k, _v in x.items(): self.assertEqual(k, "_id") break @@ -470,7 +474,7 @@ def test_deref(self): self.assertRaises(TypeError, db.dereference, None) self.assertEqual(None, db.dereference(DBRef("test", ObjectId()))) - obj = {"x": True} + obj: dict[str, Any] = {"x": True} key = db.test.insert_one(obj).inserted_id self.assertEqual(obj, db.dereference(DBRef("test", key))) self.assertEqual(obj, db.dereference(DBRef("test", key, "pymongo_test"))) diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 7053f20e1b..72b0f8a024 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -16,6 +16,7 @@ from __future__ import annotations import os +import socketserver import sys import threading @@ -27,6 +28,7 @@ from test.utils import ( CMAPListener, HeartbeatEventListener, + HeartbeatEventsListListener, assertion_context, client_context, get_pool, @@ -38,7 +40,7 @@ from unittest.mock import patch from bson import Timestamp, json_util -from pymongo import common, monitoring +from pymongo import MongoClient, common, monitoring from pymongo.errors import ( AutoReconnect, ConfigurationError, @@ -48,6 +50,7 @@ ) from pymongo.hello import Hello, HelloCompat from pymongo.helpers import _check_command_response, _check_write_command_response +from pymongo.monitoring import ServerHeartbeatFailedEvent, ServerHeartbeatStartedEvent from pymongo.server_description import SERVER_TYPE, ServerDescription from pymongo.settings import TopologySettings from pymongo.topology import Topology, _ErrorContext @@ -396,6 +399,48 @@ def assert_rtt_connection_is_disabled(self, client): self.assertIsNone(monitor._rtt_monitor._executor._thread) +class MockTCPHandler(socketserver.BaseRequestHandler): + def handle(self): + self.server.events.append("client connected") + if self.request.recv(1024).strip(): + self.server.events.append("client hello received") + self.request.close() + + +class TCPServer(socketserver.TCPServer): + allow_reuse_address = True + + def handle_request_and_shutdown(self): + self.handle_request() + self.server_close() + + +class TestHeartbeatStartOrdering(unittest.TestCase): + def test_heartbeat_start_ordering(self): + events = [] + listener = HeartbeatEventsListListener(events) + server = TCPServer(("localhost", 9999), MockTCPHandler) + server.events = events + server_thread = threading.Thread(target=server.handle_request_and_shutdown) + server_thread.start() + _c = MongoClient( + "mongodb://localhost:9999", serverSelectionTimeoutMS=500, event_listeners=(listener,) + ) + server_thread.join() + listener.wait_for_event(ServerHeartbeatStartedEvent, 1) + listener.wait_for_event(ServerHeartbeatFailedEvent, 1) + + self.assertEqual( + events, + [ + "serverHeartbeatStartedEvent", + "client connected", + "client hello received", + "serverHeartbeatFailedEvent", + ], + ) + + # Generate unified tests. globals().update(generate_test_classes(os.path.join(SDAM_PATH, "unified"), module=__name__)) diff --git a/test/test_dns.py b/test/test_dns.py index 0fe57a4fe7..9a78e451d7 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -28,7 +28,6 @@ from pymongo.common import validate_read_preference_tags from pymongo.errors import ConfigurationError from pymongo.mongo_client import MongoClient -from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.uri_parser import parse_uri, split_hosts @@ -65,8 +64,6 @@ def setUp(self): def create_test(test_case): def run_test(self): - if not _HAVE_DNSPYTHON: - raise unittest.SkipTest("DNS tests require the dnspython module") uri = test_case["uri"] seeds = test_case.get("seeds") num_seeds = test_case.get("numSeeds", len(seeds or [])) @@ -132,6 +129,8 @@ def run_test(self): wait_until( lambda: num_hosts == len(client.nodes), "wait to connect to num_hosts" ) + if test_case.get("ping", True): + client.admin.command("ping") # XXX: we should block until SRV poller runs at least once # and re-run these assertions. else: @@ -159,7 +158,6 @@ def create_tests(cls): class TestParsingErrors(unittest.TestCase): - @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS tests require the dnspython module") def test_invalid_host(self): self.assertRaisesRegex( ConfigurationError, @@ -188,7 +186,6 @@ def test_invalid_host(self): class TestCaseInsensitive(IntegrationTest): - @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS tests require the dnspython module") def test_connect_case_insensitive(self): client = MongoClient("mongodb+srv://TEST1.TEST.BUILD.10GEN.cc/") self.addCleanup(client.close) diff --git a/test/test_encryption.py b/test/test_encryption.py index 2ffb6d4935..2a60b72957 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -26,10 +26,12 @@ import textwrap import traceback import uuid +import warnings from threading import Thread from typing import Any, Dict, Mapping from pymongo.collection import Collection +from pymongo.daemon import _spawn_daemon sys.path[0:0] = [""] @@ -347,7 +349,9 @@ def test_fork(self): self.addCleanup(client.close) def target(): - client.admin.command("ping") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + client.admin.command("ping") with self.fork(target): target() @@ -461,6 +465,14 @@ def test_encrypt_decrypt(self): ) self.assertEqual(encrypted_ssn, encrypted_ssn2) + # Test encryption via UUID + encrypted_ssn3 = client_encryption.encrypt( + doc["ssn"], + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=key_id.as_uuid(), + ) + self.assertEqual(encrypted_ssn, encrypted_ssn3) + # Test decryption. decrypted_ssn = client_encryption.decrypt(encrypted_ssn) self.assertEqual(decrypted_ssn, doc["ssn"]) @@ -479,9 +491,6 @@ def test_validation(self): msg = "key_id must be a bson.binary.Binary with subtype 4" algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic - uid = uuid.uuid4() - with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt("str", algo, key_id=uid) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): client_encryption.encrypt("str", algo, key_id=Binary(b"123")) @@ -503,7 +512,10 @@ def test_bson_errors(self): def test_codec_options(self): with self.assertRaisesRegex(TypeError, "codec_options must be"): ClientEncryption( - KMS_PROVIDERS, "keyvault.datakeys", client_context.client, None # type: ignore[arg-type] + KMS_PROVIDERS, + "keyvault.datakeys", + client_context.client, + None, # type: ignore[arg-type] ) opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) @@ -1072,7 +1084,9 @@ def _test_corpus(self, opts): try: encrypted_val = client_encryption.encrypt( - value["value"], algo, **kwargs # type: ignore[arg-type] + value["value"], + algo, + **kwargs, # type: ignore[arg-type] ) if not value["allowed"]: self.fail(f"encrypt should have failed: {key!r}: {value!r}") @@ -2002,7 +2016,7 @@ def test_invalid_kms_certificate_expired(self): key = { "region": "us-east-1", "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "mongodb://127.0.0.1:8000", + "endpoint": "mongodb://127.0.0.1:9000", } # Some examples: # certificate verify failed: certificate has expired (_ssl.c:1129) @@ -2014,13 +2028,14 @@ def test_invalid_hostname_in_kms_certificate(self): key = { "region": "us-east-1", "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "mongodb://127.0.0.1:8001", + "endpoint": "mongodb://127.0.0.1:9001", } # Some examples: # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" # hostname '127.0.0.1' doesn't match 'wronghost.com' + # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" ): self.client_encrypted.create_data_key("aws", master_key=key) @@ -2032,8 +2047,8 @@ def setUp(self): super().setUp() # 1, create client with only tlsCAFile. providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) - providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8002" - providers["gcp"]["endpoint"] = "127.0.0.1:8002" + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9002" + providers["gcp"]["endpoint"] = "127.0.0.1:9002" kms_tls_opts_ca_only = { "aws": {"tlsCAFile": CA_PEM}, "azure": {"tlsCAFile": CA_PEM}, @@ -2054,18 +2069,18 @@ def setUp(self): self.addCleanup(self.client_encryption_with_tls.close) # 3, update endpoints to expired host. providers: dict = copy.deepcopy(providers) - providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8000" - providers["gcp"]["endpoint"] = "127.0.0.1:8000" - providers["kmip"]["endpoint"] = "127.0.0.1:8000" + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9000" + providers["gcp"]["endpoint"] = "127.0.0.1:9000" + providers["kmip"]["endpoint"] = "127.0.0.1:9000" self.client_encryption_expired = ClientEncryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only ) self.addCleanup(self.client_encryption_expired.close) # 3, update endpoints to invalid host. providers: dict = copy.deepcopy(providers) - providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8001" - providers["gcp"]["endpoint"] = "127.0.0.1:8001" - providers["kmip"]["endpoint"] = "127.0.0.1:8001" + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9001" + providers["gcp"]["endpoint"] = "127.0.0.1:9001" + providers["kmip"]["endpoint"] = "127.0.0.1:9001" self.client_encryption_invalid_hostname = ClientEncryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only ) @@ -2084,12 +2099,38 @@ def setUp(self): # [WinError 10054] An existing connection was forcibly closed by the remote host if sys.platform == "win32": self.cert_error += "|forcibly closed" + # 4, Test named KMS providers. + providers = { + "aws:no_client_cert": AWS_CREDS, + "azure:no_client_cert": {"identityPlatformEndpoint": "127.0.0.1:9002", **AZURE_CREDS}, + "gcp:no_client_cert": {"endpoint": "127.0.0.1:9002", **GCP_CREDS}, + "kmip:no_client_cert": KMIP_CREDS, + "aws:with_tls": AWS_CREDS, + "azure:with_tls": {"identityPlatformEndpoint": "127.0.0.1:9002", **AZURE_CREDS}, + "gcp:with_tls": {"endpoint": "127.0.0.1:9002", **GCP_CREDS}, + "kmip:with_tls": KMIP_CREDS, + } + no_cert = {"tlsCAFile": CA_PEM} + with_cert = {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM} + kms_tls_opts_4 = { + "aws:no_client_cert": no_cert, + "azure:no_client_cert": no_cert, + "gcp:no_client_cert": no_cert, + "kmip:no_client_cert": no_cert, + "aws:with_tls": with_cert, + "azure:with_tls": with_cert, + "gcp:with_tls": with_cert, + "kmip:with_tls": with_cert, + } + self.client_encryption_with_names = ClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_4 + ) def test_01_aws(self): key = { "region": "us-east-1", "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "127.0.0.1:8002", + "endpoint": "127.0.0.1:9002", } with self.assertRaisesRegex(EncryptionError, self.cert_error): self.client_encryption_no_client_cert.create_data_key("aws", key) @@ -2099,15 +2140,16 @@ def test_01_aws(self): # Some examples: # certificate verify failed: certificate has expired (_ssl.c:1129) # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) - key["endpoint"] = "127.0.0.1:8000" + key["endpoint"] = "127.0.0.1:9000" with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): self.client_encryption_expired.create_data_key("aws", key) # Some examples: # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" # hostname '127.0.0.1' doesn't match 'wronghost.com' - key["endpoint"] = "127.0.0.1:8001" + # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) + key["endpoint"] = "127.0.0.1:9001" with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" ): self.client_encryption_invalid_hostname.create_data_key("aws", key) @@ -2124,7 +2166,7 @@ def test_02_azure(self): self.client_encryption_expired.create_data_key("azure", key) # Invalid cert hostname error. with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" ): self.client_encryption_invalid_hostname.create_data_key("azure", key) @@ -2141,7 +2183,7 @@ def test_03_gcp(self): self.client_encryption_expired.create_data_key("gcp", key) # Invalid cert hostname error. with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" ): self.client_encryption_invalid_hostname.create_data_key("gcp", key) @@ -2155,7 +2197,7 @@ def test_04_kmip(self): self.client_encryption_expired.create_data_key("kmip") # Invalid cert hostname error. with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" ): self.client_encryption_invalid_hostname.create_data_key("kmip") @@ -2171,6 +2213,43 @@ def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): raise self.skipTest("OCSP not enabled") self.assertFalse(ctx.check_ocsp_endpoint) + def test_06_named_kms_providers_apply_tls_options_aws(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "127.0.0.1:9002", + } + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_with_names.create_data_key("aws:no_client_cert", key) + # "parse error" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "parse error"): + self.client_encryption_with_names.create_data_key("aws:with_tls", key) + + def test_06_named_kms_providers_apply_tls_options_azure(self): + key = {"keyVaultEndpoint": "doesnotexist.local", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_with_names.create_data_key("azure:no_client_cert", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_names.create_data_key("azure:with_tls", key) + + def test_06_named_kms_providers_apply_tls_options_gcp(self): + key = {"projectId": "foo", "location": "bar", "keyRing": "baz", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_with_names.create_data_key("gcp:no_client_cert", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_names.create_data_key("gcp:with_tls", key) + + def test_06_named_kms_providers_apply_tls_options_kmip(self): + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_with_names.create_data_key("kmip:no_client_cert") + self.client_encryption_with_names.create_data_key("kmip:with_tls") + # https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.rst#unique-index-on-keyaltnames class TestUniqueIndexOnKeyAltNamesProse(EncryptionIntegrationTest): @@ -2316,7 +2395,6 @@ def test_05_roundtrip_encrypted_unindexed(self): # https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.rst#rewrap class TestRewrapWithSeparateClientEncryption(EncryptionIntegrationTest): - MASTER_KEYS: Mapping[str, Mapping[str, Any]] = { "aws": { "region": "us-east-1", @@ -2527,6 +2605,7 @@ def MongoClient(**kwargs): class TestRangeQueryProse(EncryptionIntegrationTest): @client_context.require_no_standalone @client_context.require_version_min(7, 0, -1) + @client_context.require_version_max(7, 9, 99) def setUp(self): super().setUp() self.key1_document = json_data("etc", "data", "keys", "key1-document.json") @@ -2548,10 +2627,12 @@ def setUp(self): self.db = self.encrypted_client.db self.addCleanup(self.encrypted_client.close) - def run_expression_find(self, name, expression, expected_elems, range_opts, use_expr=False): + def run_expression_find( + self, name, expression, expected_elems, range_opts, use_expr=False, key_id=None + ): find_payload = self.client_encryption.encrypt_expression( expression=expression, - key_id=self.key1_id, + key_id=key_id or self.key1_id, algorithm=Algorithm.RANGEPREVIEW, query_type=QueryType.RANGEPREVIEW, contention_factor=0, @@ -2593,16 +2674,20 @@ def encrypt_and_cast(i): self.assertEqual(self.client_encryption.decrypt(insert_payload), cast_func(6)) # Case 2. + expression = { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(6)}}, + {f"encrypted{name}": {"$lte": cast_func(200)}}, + ] + } + self.run_expression_find(name, expression, [cast_func(i) for i in [6, 30, 200]], range_opts) + # Case 2, with UUID key_id self.run_expression_find( name, - { - "$and": [ - {f"encrypted{name}": {"$gte": cast_func(6)}}, - {f"encrypted{name}": {"$lte": cast_func(200)}}, - ] - }, + expression, [cast_func(i) for i in [6, 30, 200]], range_opts, + key_id=self.key1_id.as_uuid(), ) # Case 3. @@ -2928,5 +3013,59 @@ def test_collection_name_collision(self): self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) +def start_mongocryptd(port) -> None: + args = ["mongocryptd", f"--port={port}", "--idleShutdownTimeoutSecs=60"] + _spawn_daemon(args) + + +class TestNoSessionsSupport(EncryptionIntegrationTest): + mongocryptd_client: MongoClient + MONGOCRYPTD_PORT = 27020 + + @classmethod + @unittest.skipIf(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is installed") + def setUpClass(cls): + super().setUpClass() + start_mongocryptd(cls.MONGOCRYPTD_PORT) + + @classmethod + def tearDownClass(cls): + super().tearDownClass() + + def setUp(self) -> None: + self.listener = OvertCommandListener() + self.mongocryptd_client = MongoClient( + f"mongodb://localhost:{self.MONGOCRYPTD_PORT}", event_listeners=[self.listener] + ) + self.addCleanup(self.mongocryptd_client.close) + + hello = self.mongocryptd_client.db.command("hello") + self.assertNotIn("logicalSessionTimeoutMinutes", hello) + + def test_implicit_session_ignored_when_unsupported(self): + self.listener.reset() + with self.assertRaises(OperationFailure): + self.mongocryptd_client.db.test.find_one() + + self.assertNotIn("lsid", self.listener.started_events[0].command) + + with self.assertRaises(OperationFailure): + self.mongocryptd_client.db.test.insert_one({"x": 1}) + + self.assertNotIn("lsid", self.listener.started_events[1].command) + + def test_explicit_session_errors_when_unsupported(self): + self.listener.reset() + with self.mongocryptd_client.start_session() as s: + with self.assertRaisesRegex( + ConfigurationError, r"Sessions are not supported by this MongoDB deployment" + ): + self.mongocryptd_client.db.test.find_one(session=s) + with self.assertRaisesRegex( + ConfigurationError, r"Sessions are not supported by this MongoDB deployment" + ): + self.mongocryptd_client.db.test.insert_one({"x": 1}, session=s) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_fork.py b/test/test_fork.py index 7b19e4cd8d..d9ac3d261d 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -18,6 +18,7 @@ import os import sys import unittest +import warnings from multiprocessing import Pipe sys.path[0:0] = [""] @@ -43,7 +44,9 @@ def test_lock_client(self): with self.client._MongoClient__lock: def target(): - self.client.admin.command("ping") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.client.admin.command("ping") with self.fork(target): pass @@ -72,7 +75,11 @@ def test_topology_reset(self): parent_cursor_exc = self.client._kill_cursors_executor def target(): - self.client.admin.command("ping") + # Catch the fork warning and send to the parent for assertion. + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter("always") + self.client.admin.command("ping") + child_conn.send(str(ctx[0])) child_conn.send(self.client._topology._pid) child_conn.send( ( @@ -83,6 +90,8 @@ def target(): with self.fork(target): self.assertEqual(self.client._topology._pid, init_id) + fork_warning = parent_conn.recv() + self.assertIn("MongoClient opened before fork", fork_warning) child_id = parent_conn.recv() self.assertNotEqual(child_id, init_id) passed, msg = parent_conn.recv() diff --git a/test/test_gridfs.py b/test/test_gridfs.py index f94736708e..88fccd6544 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -21,6 +21,7 @@ import threading import time from io import BytesIO +from unittest.mock import patch sys.path[0:0] = [""] @@ -30,7 +31,7 @@ import gridfs from bson.binary import Binary from gridfs.errors import CorruptGridFile, FileExists, NoFile -from gridfs.grid_file import GridOutCursor +from gridfs.grid_file import DEFAULT_CHUNK_SIZE, GridOutCursor from pymongo.database import Database from pymongo.errors import ( ConfigurationError, @@ -344,8 +345,18 @@ def test_file_exists(self): one.write(b"some content") one.close() + # Attempt to upload a file with more chunks to the same _id. + with patch("gridfs.grid_file._UPLOAD_BUFFER_SIZE", DEFAULT_CHUNK_SIZE): + two = self.fs.new_file(_id=123) + self.assertRaises(FileExists, two.write, b"x" * DEFAULT_CHUNK_SIZE * 3) + # Original file is still readable (no extra chunks were uploaded). + self.assertEqual(self.fs.get(123).read(), b"some content") + two = self.fs.new_file(_id=123) - self.assertRaises(FileExists, two.write, b"x" * 262146) + two.write(b"some content") + self.assertRaises(FileExists, two.close) + # Original file is still readable. + self.assertEqual(self.fs.get(123).read(), b"some content") def test_exists(self): oid = self.fs.put(b"hello") diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 53e5cad54e..f1e7800ce3 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -18,9 +18,14 @@ import datetime import itertools +import sys import threading import time from io import BytesIO +from unittest.mock import patch + +sys.path[0:0] = [""] + from test import IntegrationTest, client_context, unittest from test.utils import joinall, one, rs_client, rs_or_single_client, single_client @@ -34,6 +39,7 @@ ConfigurationError, NotPrimaryError, ServerSelectionTimeoutError, + WriteConcernError, ) from pymongo.mongo_client import MongoClient from pymongo.read_preferences import ReadPreference @@ -276,6 +282,39 @@ def test_upload_from_stream_with_id(self): ) self.assertEqual(b"custom id", self.fs.open_download_stream(oid).read()) + @patch("gridfs.grid_file._UPLOAD_BUFFER_CHUNKS", 3) + @client_context.require_failCommand_fail_point + def test_upload_bulk_write_error(self): + # Test BulkWriteError from insert_many is converted to an insert_one style error. + expected_wce = { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes", + } + cause_wce = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["insert"], "writeConcernError": expected_wce}, + } + gin = self.fs.open_upload_stream("test_file", chunk_size_bytes=1) + with self.fail_point(cause_wce): + # Assert we raise WriteConcernError, not BulkWriteError. + with self.assertRaises(WriteConcernError): + gin.write(b"hello world") + # 3 chunks were uploaded. + self.assertEqual(3, self.db.fs.chunks.count_documents({"files_id": gin._id})) + gin.abort() + + @patch("gridfs.grid_file._UPLOAD_BUFFER_CHUNKS", 10) + def test_upload_batching(self): + with self.fs.open_upload_stream("test_file", chunk_size_bytes=1) as gin: + gin.write(b"s" * (10 - 1)) + # No chunks were uploaded yet. + self.assertEqual(0, self.db.fs.chunks.count_documents({"files_id": gin._id})) + gin.write(b"s") + # All chunks were uploaded since we hit the _UPLOAD_BUFFER_CHUNKS limit. + self.assertEqual(10, self.db.fs.chunks.count_documents({"files_id": gin._id})) + def test_open_upload_stream(self): gin = self.fs.open_upload_stream("from_stream") gin.write(b"from stream") @@ -362,6 +401,7 @@ def test_rename(self): self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "first_name") self.assertEqual(b"testing", self.fs.open_download_stream_by_name("second_name").read()) + @patch("gridfs.grid_file._UPLOAD_BUFFER_SIZE", 5) def test_abort(self): gin = self.fs.open_upload_stream("test_filename", chunk_size_bytes=5) gin.write(b"test1") diff --git a/test/test_index_management.py b/test/test_index_management.py index 9db9a22aea..5b6653dcba 100644 --- a/test/test_index_management.py +++ b/test/test_index_management.py @@ -25,10 +25,13 @@ from test import IntegrationTest, unittest from test.unified_format import generate_test_classes +from test.utils import AllowListEventListener, EventListener from pymongo import MongoClient from pymongo.errors import OperationFailure from pymongo.operations import SearchIndexModel +from pymongo.read_concern import ReadConcern +from pymongo.write_concern import WriteConcern _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "index_management") @@ -39,7 +42,8 @@ class TestCreateSearchIndex(IntegrationTest): def test_inputs(self): if not os.environ.get("TEST_INDEX_MANAGEMENT"): raise unittest.SkipTest("Skipping index management tests") - client = MongoClient() + listener = AllowListEventListener("createSearchIndexes") + client = MongoClient(event_listeners=[listener]) self.addCleanup(client.close) coll = client.test.test coll.drop() @@ -55,8 +59,25 @@ def test_inputs(self): with self.assertRaises(OperationFailure): coll.create_search_index(model_kwargs) + listener.reset() + with self.assertRaises(OperationFailure): + coll.create_search_index({"definition": definition, "arbitraryOption": 1}) + self.assertEqual( + {"definition": definition, "arbitraryOption": 1}, + listener.events[0].command["indexes"][0], + ) + + listener.reset() + with self.assertRaises(OperationFailure): + coll.create_search_index({"definition": definition, "type": "search"}) + self.assertEqual( + {"definition": definition, "type": "search"}, listener.events[0].command["indexes"][0] + ) + + +class SearchIndexIntegrationBase(unittest.TestCase): + db_name = "test_search_index_base" -class TestSearchIndexProse(unittest.TestCase): @classmethod def setUpClass(cls) -> None: super().setUpClass() @@ -65,9 +86,12 @@ def setUpClass(cls) -> None: url = os.environ.get("MONGODB_URI") username = os.environ["DB_USER"] password = os.environ["DB_PASSWORD"] - cls.client = MongoClient(url, username=username, password=password) + cls.listener = listener = EventListener() + cls.client = MongoClient( + url, username=username, password=password, event_listeners=[listener] + ) cls.client.drop_database(_NAME) - cls.db = cls.client.test_search_index_prose + cls.db = cls.client[cls.db_name] @classmethod def tearDownClass(cls): @@ -87,6 +111,34 @@ def wait_for_ready(self, coll, name=_NAME, predicate=None): break time.sleep(5) + +class TestSearchIndexIntegration(SearchIndexIntegrationBase): + db_name = "test_search_index" + + def test_comment_field(self): + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Create a new search index on ``coll0`` that implicitly passes its type. + search_definition = {"mappings": {"dynamic": False}} + self.listener.reset() + implicit_search_resp = coll0.create_search_index( + model={"name": _NAME + "-implicit", "definition": search_definition}, comment="foo" + ) + event = self.listener.events[0] + self.assertEqual(event.command["comment"], "foo") + + # Get the index definition. + self.listener.reset() + coll0.list_search_indexes(name=implicit_search_resp, comment="foo").next() + event = self.listener.events[0] + self.assertEqual(event.command["comment"], "foo") + + +class TestSearchIndexProse(SearchIndexIntegrationBase): + db_name = "test_search_index_prose" + def test_case_1(self): """Driver can successfully create and list search indexes.""" @@ -217,24 +269,104 @@ def test_case_5(self): # Run a ``dropSearchIndex`` command and assert that no error is thrown. coll0.drop_search_index("foo") + def test_case_6(self): + """Driver can successfully create and list search indexes with non-default readConcern and writeConcern.""" + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) -if os.environ.get("TEST_INDEX_MANAGEMENT"): - globals().update( - generate_test_classes( - _TEST_PATH, - module=__name__, + # Apply a write concern ``WriteConcern(w=1)`` and a read concern with ``ReadConcern(level="majority")`` to ``coll0``. + coll0 = coll0.with_options( + write_concern=WriteConcern(w="1"), read_concern=ReadConcern(level="majority") ) - ) -else: - class TestIndexManagementUnifiedTests(unittest.TestCase): - @classmethod - def setUpClass(cls) -> None: - raise unittest.SkipTest("Skipping index management pending PYTHON-3951") + # Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. + name = "test-search-index-case6" + model = {"name": name, "definition": {"mappings": {"dynamic": False}}} + resp = coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index-case6"``. + self.assertEqual(resp, name) - def test_placeholder(self): - pass + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: + # - An index with the ``name`` of ``test-search-index-case6`` is present and the index has a field ``queryable`` with a value of ``true``. + index = self.wait_for_ready(coll0, name) + # Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model["definition"]) + + def test_case_7(self): + """Driver handles index types.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Use these search and vector search definitions for indexes. + search_definition = {"mappings": {"dynamic": False}} + vector_search_definition = { + "fields": [ + { + "type": "vector", + "path": "plot_embedding", + "numDimensions": 1536, + "similarity": "euclidean", + }, + ] + } + + # Create a new search index on ``coll0`` that implicitly passes its type. + implicit_search_resp = coll0.create_search_index( + model={"name": _NAME + "-implicit", "definition": search_definition} + ) + + # Get the index definition. + resp = coll0.list_search_indexes(name=implicit_search_resp).next() + + # Assert that the index model contains the correct index type: ``"search"``. + self.assertEqual(resp["type"], "search") + + # Create a new search index on ``coll0`` that explicitly passes its type. + explicit_search_resp = coll0.create_search_index( + model={"name": _NAME + "-explicit", "type": "search", "definition": search_definition} + ) + + # Get the index definition. + resp = coll0.list_search_indexes(name=explicit_search_resp).next() + + # Assert that the index model contains the correct index type: ``"search"``. + self.assertEqual(resp["type"], "search") + + # Create a new vector search index on ``coll0`` that explicitly passes its type. + explicit_vector_resp = coll0.create_search_index( + model={ + "name": _NAME + "-vector", + "type": "vectorSearch", + "definition": vector_search_definition, + } + ) + + # Get the index definition. + resp = coll0.list_search_indexes(name=explicit_vector_resp).next() + + # Assert that the index model contains the correct index type: ``"vectorSearch"``. + self.assertEqual(resp["type"], "vectorSearch") + + # Catch the error raised when trying to create a vector search index without specifying the type + with self.assertRaises(OperationFailure) as e: + coll0.create_search_index( + model={"name": _NAME + "-error", "definition": vector_search_definition} + ) + self.assertIn("Attribute mappings missing.", e.exception.details["errmsg"]) + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) if __name__ == "__main__": unittest.main() diff --git a/test/test_json_util.py b/test/test_json_util.py index a35d736ebd..0f73a8efd9 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -20,7 +20,8 @@ import re import sys import uuid -from typing import Any, List, MutableMapping +from collections import OrderedDict +from typing import Any, List, MutableMapping, Tuple, Type from bson.codec_options import CodecOptions, DatetimeConversion @@ -40,9 +41,12 @@ from bson.code import Code from bson.datetime_ms import _max_datetime_ms from bson.dbref import DBRef +from bson.decimal128 import Decimal128 from bson.int64 import Int64 from bson.json_util import ( + CANONICAL_JSON_OPTIONS, LEGACY_JSON_OPTIONS, + RELAXED_JSON_OPTIONS, DatetimeRepresentation, JSONMode, JSONOptions, @@ -252,8 +256,8 @@ def test_datetime(self): def test_datetime_ms(self): # Test ISO8601 in-range - dat_min = {"x": DatetimeMS(0)} - dat_max = {"x": DatetimeMS(_max_datetime_ms())} + dat_min: dict[str, Any] = {"x": DatetimeMS(0)} + dat_max: dict[str, Any] = {"x": DatetimeMS(_max_datetime_ms())} opts = JSONOptions(datetime_representation=DatetimeRepresentation.ISO8601) self.assertEqual( @@ -450,7 +454,7 @@ def test_uuid_uuid_rep_unspecified(self): ) # Cannot directly encode native UUIDs with UNSPECIFIED. - doc = {"uuid": _uuid} + doc: dict[str, Any] = {"uuid": _uuid} with self.assertRaises(ValueError): json_util.dumps(doc, json_options=options) @@ -545,18 +549,73 @@ def test_numberlong(self): self.assertEqual(json_util.dumps({"weight": Int64(65535)}), '{"weight": 65535}') json_options = JSONOptions(strict_number_long=True, json_mode=JSONMode.LEGACY) self.assertEqual(json_util.dumps({"weight": Int64(65535)}, json_options=json_options), jsn) - - def test_loads_document_class(self): - # document_class dict should always work - self.assertEqual( - {"foo": "bar"}, - json_util.loads('{"foo": "bar"}', json_options=JSONOptions(document_class=dict)), - ) + # Ensure json_util.default converts Int64 to int in non-strict mode. + converted = json_util.default(Int64(65535)) + self.assertEqual(converted, 65535) + self.assertNotIsInstance(converted, Int64) self.assertEqual( - SON([("foo", "bar"), ("b", 1)]), - json_util.loads('{"foo": "bar", "b": 1}', json_options=JSONOptions(document_class=SON)), + json_util.default(Int64(65535), json_options=json_options), {"$numberLong": "65535"} ) + def test_loads_document_class(self): + json_doc = '{"foo": "bar", "b": 1, "d": {"a": 1}}' + expected_doc = {"foo": "bar", "b": 1, "d": {"a": 1}} + for cls in (dict, SON, OrderedDict): + doc = json_util.loads(json_doc, json_options=JSONOptions(document_class=cls)) + self.assertEqual(doc, expected_doc) + self.assertIsInstance(doc, cls) + self.assertIsInstance(doc["d"], cls) + + def test_encode_subclass(self): + cases: list[Tuple[Type, Any]] = [ + (int, (1,)), + (int, (2 << 60,)), + (float, (1.1,)), + (Int64, (64,)), + (Int64, (2 << 60,)), + (str, ("str",)), + (bytes, (b"bytes",)), + (datetime.datetime, (2024, 1, 16)), + (DatetimeMS, (1,)), + (uuid.UUID, ("f47ac10b-58cc-4372-a567-0e02b2c3d479",)), + (Binary, (b"1", USER_DEFINED_SUBTYPE)), + (Code, ("code",)), + (DBRef, ("coll", ObjectId())), + (ObjectId, ("65a6dab5f98bc03906ee3597",)), + (MaxKey, ()), + (MinKey, ()), + (Regex, ("pat",)), + (Timestamp, (1, 1)), + (Decimal128, ("0.5",)), + ] + allopts = [ + CANONICAL_JSON_OPTIONS.with_options(uuid_representation=STANDARD), + RELAXED_JSON_OPTIONS.with_options(uuid_representation=STANDARD), + LEGACY_JSON_OPTIONS.with_options(uuid_representation=STANDARD), + ] + for cls, args in cases: + basic_obj = cls(*args) + my_cls = type(f"My{cls.__name__}", (cls,), {}) + my_obj = my_cls(*args) + for opts in allopts: + expected_json = json_util.dumps(basic_obj, json_options=opts) + self.assertEqual(json_util.dumps(my_obj, json_options=opts), expected_json) + + def test_encode_type_marker(self): + # Assert that a custom subclass can be JSON encoded based on the _type_marker attribute. + class MyMaxKey: + _type_marker = 127 + + expected_json = json_util.dumps(MaxKey()) + self.assertEqual(json_util.dumps(MyMaxKey()), expected_json) + + # Test a class that inherits from two built in types + class MyBinary(Binary): + pass + + expected_json = json_util.dumps(Binary(b"bin", USER_DEFINED_SUBTYPE)) + self.assertEqual(json_util.dumps(MyBinary(b"bin", USER_DEFINED_SUBTYPE)), expected_json) + class TestJsonUtilRoundtrip(IntegrationTest): def test_cursor(self): diff --git a/test/test_logger.py b/test/test_logger.py new file mode 100644 index 0000000000..1dfa0724e5 --- /dev/null +++ b/test/test_logger.py @@ -0,0 +1,101 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +from test import unittest +from test.test_client import IntegrationTest +from test.utils import single_client +from unittest.mock import patch + +from bson import json_util +from pymongo.errors import OperationFailure +from pymongo.logger import _DEFAULT_DOCUMENT_LENGTH + + +# https://github.com/mongodb/specifications/tree/master/source/command-logging-and-monitoring/tests#prose-tests +class TestLogger(IntegrationTest): + def test_default_truncation_limit(self): + docs = [{"x": "y"} for _ in range(100)] + db = self.db + + with patch.dict("os.environ"): + os.environ.pop("MONGOB_LOG_MAX_DOCUMENT_LENGTH", None) + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + db.test.insert_many(docs) + + cmd_started_log = json_util.loads(cm.records[0].message) + self.assertEqual(len(cmd_started_log["command"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + cmd_succeeded_log = json_util.loads(cm.records[1].message) + self.assertLessEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + list(db.test.find({})) + cmd_succeeded_log = json_util.loads(cm.records[1].message) + self.assertEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + def test_configured_truncation_limit(self): + cmd = {"hello": True} + db = self.db + with patch.dict("os.environ", {"MONGOB_LOG_MAX_DOCUMENT_LENGTH": "5"}): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + db.command(cmd) + + cmd_started_log = json_util.loads(cm.records[0].message) + self.assertEqual(len(cmd_started_log["command"]), 5 + 3) + + cmd_succeeded_log = json_util.loads(cm.records[1].message) + self.assertLessEqual(len(cmd_succeeded_log["reply"]), 5 + 3) + with self.assertRaises(OperationFailure): + db.command({"notARealCommand": True}) + cmd_failed_log = json_util.loads(cm.records[-1].message) + self.assertEqual(len(cmd_failed_log["failure"]), 5 + 3) + + def test_truncation_multi_byte_codepoints(self): + document_lengths = ["20000", "20001", "20002"] + multi_byte_char_str_len = 50_000 + str_to_repeat = "界" + + multi_byte_char_str = "" + for i in range(multi_byte_char_str_len): + multi_byte_char_str += str_to_repeat + + for length in document_lengths: + with patch.dict("os.environ", {"MONGOB_LOG_MAX_DOCUMENT_LENGTH": length}): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + self.db.test.insert_one({"x": multi_byte_char_str}) + cmd_started_log = json_util.loads(cm.records[0].message)["command"] + + cmd_started_log = cmd_started_log[:-3] + last_3_bytes = cmd_started_log.encode()[-3:].decode() + + self.assertEqual(last_3_bytes, str_to_repeat) + + def test_logging_without_listeners(self): + c = single_client() + self.assertEqual(len(c._event_listeners.event_listeners()), 0) + with self.assertLogs("pymongo.connection", level="DEBUG") as cm: + c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + with self.assertLogs("pymongo.serverSelection", level="DEBUG") as cm: + c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index a87cbad587..1b0130f7d8 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -20,6 +20,8 @@ import time import warnings +from pymongo.operations import _Op + sys.path[0:0] = [""] from test import client_context, unittest @@ -113,7 +115,7 @@ def test_last_write_date(self): client.pymongo_test.test.insert_one({}) # Wait for the server description to be updated. time.sleep(1) - server = client._topology.select_server(writable_server_selector) + server = client._topology.select_server(writable_server_selector, _Op.TEST) first = server.description.last_write_date self.assertTrue(first) # The first last_write_date may correspond to a internal server write, @@ -122,7 +124,7 @@ def test_last_write_date(self): client.pymongo_test.test.insert_one({}) # Wait for the server description to be updated. time.sleep(1) - server = client._topology.select_server(writable_server_selector) + server = client._topology.select_server(writable_server_selector, _Op.TEST) second = server.description.last_write_date assert first is not None diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index a1e2438840..f39a1cb03f 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -18,6 +18,8 @@ import sys import threading +from pymongo.operations import _Op + sys.path[0:0] = [""] from test import MockClientTest, client_context, unittest @@ -60,7 +62,8 @@ def do_simple_op(client, nthreads): def writable_addresses(topology): return { - server.description.address for server in topology.select_servers(writable_server_selector) + server.description.address + for server in topology.select_servers(writable_server_selector, _Op.TEST) } diff --git a/test/test_monitor.py b/test/test_monitor.py index 0495a8cbc7..92bcdc49ad 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -16,6 +16,7 @@ from __future__ import annotations import gc +import subprocess import sys from functools import partial @@ -79,6 +80,17 @@ def test_cleanup_executors_on_client_close(self): for executor in executors: wait_until(lambda: executor._stopped, f"closed executor: {executor._name}", timeout=5) + def test_no_thread_start_runtime_err_on_shutdown(self): + """Test we silence noisy runtime errors fired when the MongoClient spawns a new thread + on process shutdown.""" + command = [sys.executable, "-c", "from pymongo import MongoClient; c = MongoClient()"] + completed_process: subprocess.CompletedProcess = subprocess.run( + command, capture_output=True + ) + + self.assertFalse(completed_process.stderr) + self.assertFalse(completed_process.stdout) + if __name__ == "__main__": unittest.main() diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 6880a30dc7..868078d5c8 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -1085,7 +1085,7 @@ def test_sensitive_commands(self): self.listener.reset() cmd = SON([("getnonce", 1)]) - listeners.publish_command_start(cmd, "pymongo_test", 12345, self.client.address) # type: ignore[arg-type] + listeners.publish_command_start(cmd, "pymongo_test", 12345, self.client.address, None) # type: ignore[arg-type] delta = datetime.timedelta(milliseconds=100) listeners.publish_command_success( delta, @@ -1093,6 +1093,7 @@ def test_sensitive_commands(self): "getnonce", 12345, self.client.address, # type: ignore[arg-type] + None, database_name="pymongo_test", ) started = self.listener.started_events[0] @@ -1161,7 +1162,7 @@ def test_command_event_repr(self): self.assertEqual( repr(event), "", + "command: 'ping', operation_id: 2, service_id: None, server_connection_id: None>", ) delta = datetime.timedelta(milliseconds=100) event = monitoring.CommandSucceededEvent( @@ -1171,7 +1172,7 @@ def test_command_event_repr(self): repr(event), "", + "service_id: None, server_connection_id: None>", ) event = monitoring.CommandFailedEvent( delta, {"ok": 0}, "ping", request_id, connection_id, operation_id, database_name=db_name @@ -1180,7 +1181,7 @@ def test_command_event_repr(self): repr(event), "", + "failure: {'ok': 0}, service_id: None, server_connection_id: None>", ) def test_server_heartbeat_event_repr(self): @@ -1191,7 +1192,9 @@ def test_server_heartbeat_event_repr(self): ) delta = 0.1 event = monitoring.ServerHeartbeatSucceededEvent( - delta, {"ok": 1}, connection_id # type: ignore[arg-type] + delta, + {"ok": 1}, # type: ignore[arg-type] + connection_id, ) self.assertEqual( repr(event), @@ -1199,7 +1202,9 @@ def test_server_heartbeat_event_repr(self): "duration: 0.1, awaited: False, reply: {'ok': 1}>", ) event = monitoring.ServerHeartbeatFailedEvent( - delta, "ERROR", connection_id # type: ignore[arg-type] + delta, + "ERROR", # type: ignore[arg-type] + connection_id, ) self.assertEqual( repr(event), @@ -1216,7 +1221,10 @@ def test_server_event_repr(self): "", ) event = monitoring.ServerDescriptionChangedEvent( - "PREV", "NEW", server_address, topology_id # type: ignore[arg-type] + "PREV", # type: ignore[arg-type] + "NEW", # type: ignore[arg-type] + server_address, + topology_id, ) self.assertEqual( repr(event), @@ -1233,7 +1241,9 @@ def test_topology_event_repr(self): event = monitoring.TopologyOpenedEvent(topology_id) self.assertEqual(repr(event), "") event = monitoring.TopologyDescriptionChangedEvent( - "PREV", "NEW", topology_id # type: ignore[arg-type] + "PREV", # type: ignore[arg-type] + "NEW", # type: ignore[arg-type] + topology_id, ) self.assertEqual( repr(event), diff --git a/test/test_pymongo.py b/test/test_pymongo.py index d4203ed5cf..4247520c22 100644 --- a/test/test_pymongo.py +++ b/test/test_pymongo.py @@ -22,6 +22,7 @@ from test import unittest import pymongo +from pymongo._version import get_version_tuple class TestPyMongo(unittest.TestCase): @@ -29,6 +30,14 @@ def test_mongo_client_alias(self): # Testing that pymongo module imports mongo_client.MongoClient self.assertEqual(pymongo.MongoClient, pymongo.mongo_client.MongoClient) + def test_get_version_tuple(self): + self.assertEqual(get_version_tuple("4.8.0.dev1"), (4, 8, 0, ".dev1")) + self.assertEqual(get_version_tuple("4.8.1"), (4, 8, 1)) + self.assertEqual(get_version_tuple("5.0.0rc1"), (5, 0, 0, "rc1")) + self.assertEqual(get_version_tuple("5.0"), (5, 0)) + with self.assertRaises(ValueError): + get_version_tuple("5") + if __name__ == "__main__": unittest.main() diff --git a/test/test_raw_bson.py b/test/test_raw_bson.py index 38b4dd197a..11bc80dd9f 100644 --- a/test/test_raw_bson.py +++ b/test/test_raw_bson.py @@ -22,7 +22,7 @@ from test import client_context, unittest from test.test_client import IntegrationTest -from bson import decode, encode +from bson import Code, DBRef, decode, encode from bson.binary import JAVA_LEGACY, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import InvalidBSON @@ -31,7 +31,6 @@ class TestRawBSONDocument(IntegrationTest): - # {'_id': ObjectId('556df68b6e32ab21a95e0785'), # 'name': 'Sherlock', # 'addresses': [{'street': 'Baker Street'}]} @@ -200,6 +199,20 @@ def test_preserve_key_ordering(self): for rkey, elt in zip(rawdoc, keyvaluepairs): self.assertEqual(rkey, elt[0]) + def test_contains_code_with_scope(self): + doc = RawBSONDocument(encode({"value": Code("x=1", scope={})})) + + self.assertEqual(decode(encode(doc)), {"value": Code("x=1", {})}) + self.assertEqual(doc["value"].scope, RawBSONDocument(encode({}))) + + def test_contains_dbref(self): + doc = RawBSONDocument(encode({"value": DBRef("test", "id")})) + raw = {"$ref": "test", "$id": "id"} + raw_encoded = encode(decode(encode(raw))) + + self.assertEqual(decode(encode(doc)), {"value": DBRef("test", "id")}) + self.assertEqual(doc["value"].raw, raw_encoded) + if __name__ == "__main__": unittest.main() diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 986785faf6..2d6a3e9f1b 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -20,6 +20,9 @@ import pickle import random import sys +from typing import Any + +from pymongo.operations import _Op sys.path[0:0] = [""] @@ -135,7 +138,6 @@ def assertReadsFrom(self, expected, **kwargs): class TestSingleSecondaryOk(TestReadPreferencesBase): def test_reads_from_secondary(self): - host, port = next(iter(self.client.secondaries)) # Direct connection to a secondary. client = single_client(host, port) @@ -267,7 +269,7 @@ def test_nearest(self): not_used = data_members.difference(used) latencies = ", ".join( "%s: %sms" % (server.description.address, server.description.round_trip_time) - for server in c._get_topology().select_servers(readable_server_selector) + for server in c._get_topology().select_servers(readable_server_selector, _Op.TEST) ) self.assertFalse( @@ -285,8 +287,8 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **client_options) @contextlib.contextmanager - def _conn_for_reads(self, read_preference, session): - context = super()._conn_for_reads(read_preference, session) + def _conn_for_reads(self, read_preference, session, operation): + context = super()._conn_for_reads(read_preference, session, operation) with context as (conn, read_preference): self.record_a_read(conn.address) yield conn, read_preference @@ -299,7 +301,7 @@ def _conn_from_server(self, read_preference, server, session): yield conn, read_preference def record_a_read(self, address): - server = self._get_topology().select_server_by_address(address, 0) + server = self._get_topology().select_server_by_address(address, _Op.TEST, 0) self.has_read_from.add(server) @@ -321,7 +323,6 @@ class TestCommandAndReadPreference(IntegrationTest): def setUpClass(cls): super().setUpClass() cls.c = ReadPrefTester( - client_context.pair, # Ignore round trip times, to test ReadPreference modes only. localThresholdMS=1000 * 1000, ) @@ -442,7 +443,6 @@ def test_moving_average(self): class TestMongosAndReadPreference(IntegrationTest): def test_read_preference_document(self): - pref = Primary() self.assertEqual(pref.document, {"mode": "primary"}) @@ -514,7 +514,7 @@ def test_read_preference_document_hedge(self): else: self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {"enabled": True} + hedge: dict[str, Any] = {"enabled": True} pref = cls(hedge=hedge) self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) out = _maybe_add_read_preference({}, pref) @@ -559,7 +559,6 @@ def test_send_hedge(self): self.assertNotIn("$readPreference", cmd) def test_maybe_add_read_preference(self): - # Primary doesn't add $readPreference out = _maybe_add_read_preference({}, Primary()) self.assertEqual(out, {}) diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 8779ea1ed8..e3028688d7 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -20,6 +20,9 @@ import sys import threading +from bson import SON +from pymongo.errors import AutoReconnect + sys.path[0:0] = [""] from test import ( @@ -31,9 +34,12 @@ ) from test.utils import ( CMAPListener, + EventListener, OvertCommandListener, SpecTestCreator, + rs_client, rs_or_single_client, + set_fail_point, ) from test.utils_spec_runner import SpecRunner @@ -221,5 +227,48 @@ def test_pool_paused_error_is_retryable(self): self.assertEqual(1, len(failed), msg) +class TestRetryableReads(IntegrationTest): + @client_context.require_multiple_mongoses + @client_context.require_failCommand_fail_point + def test_retryable_reads_in_sharded_cluster_multiple_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "closeConnection": True, + "appName": "retryableReadTest", + }, + } + + mongos_clients = [] + + for mongos in client_context.mongos_seeds().split(","): + client = rs_or_single_client(mongos) + set_fail_point(client, fail_command) + self.addCleanup(client.close) + mongos_clients.append(client) + + listener = OvertCommandListener() + client = rs_or_single_client( + client_context.mongos_seeds(), + appName="retryableReadTest", + event_listeners=[listener], + retryReads=True, + ) + + with self.fail_point(fail_command): + with self.assertRaises(AutoReconnect): + client.t.t.find_one({}) + + # Disable failpoints on each mongos + for client in mongos_clients: + fail_command["mode"] = "off" + set_fail_point(client, fail_command) + + self.assertEqual(len(listener.failed_events), 2) + self.assertEqual(len(listener.succeeded_events), 0) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_retryable_reads_unified.py b/test/test_retryable_reads_unified.py index 69bee081a5..6d196aaabd 100644 --- a/test/test_retryable_reads_unified.py +++ b/test/test_retryable_reads_unified.py @@ -15,8 +15,8 @@ """Test the Retryable Reads unified spec tests.""" from __future__ import annotations -import os import sys +from pathlib import Path sys.path[0:0] = [""] @@ -24,7 +24,7 @@ from test.unified_format import generate_test_classes # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads", "unified") +TEST_PATH = Path(__file__).parent / "retryable_reads/unified" # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 2da6f53f4b..ccc6b12e01 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -31,6 +31,7 @@ OvertCommandListener, SpecTestCreator, rs_or_single_client, + set_fail_point, ) from test.utils_spec_runner import SpecRunner from test.version import Version @@ -40,6 +41,7 @@ from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo.errors import ( + AutoReconnect, ConnectionFailure, OperationFailure, ServerSelectionTimeoutError, @@ -437,7 +439,7 @@ def test_batch_splitting_retry_fails(self): ) self.listener.reset() with self.client.start_session() as session: - initial_txn = session._server_session._transaction_id + initial_txn = session._transaction_id try: coll.bulk_write( [ @@ -465,10 +467,50 @@ def test_batch_splitting_retry_fails(self): started[1].command.pop("$clusterTime") started[2].command.pop("$clusterTime") self.assertEqual(started[1].command, started[2].command) - final_txn = session._server_session._transaction_id + final_txn = session._transaction_id self.assertEqual(final_txn, expected_txn) self.assertEqual(coll.find_one(projection={"_id": True}), {"_id": 1}) + @client_context.require_multiple_mongoses + @client_context.require_failCommand_fail_point + def test_retryable_writes_in_sharded_cluster_multiple_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": True, + "appName": "retryableWriteTest", + }, + } + + mongos_clients = [] + + for mongos in client_context.mongos_seeds().split(","): + client = rs_or_single_client(mongos) + set_fail_point(client, fail_command) + self.addCleanup(client.close) + mongos_clients.append(client) + + listener = OvertCommandListener() + client = rs_or_single_client( + client_context.mongos_seeds(), + appName="retryableWriteTest", + event_listeners=[listener], + retryWrites=True, + ) + + with self.assertRaises(AutoReconnect): + client.t.t.insert_one({"x": 1}) + + # Disable failpoints on each mongos + for client in mongos_clients: + fail_command["mode"] = "off" + set_fail_point(client, fail_command) + + self.assertEqual(len(listener.failed_events), 2) + self.assertEqual(len(listener.succeeded_events), 0) + class TestWriteConcernError(IntegrationTest): RUN_ON_LOAD_BALANCER = True @@ -519,7 +561,7 @@ def test_RetryableWriteError_error_label_RawBSONDocument(self): "insert", "testcoll", documents=[{"_id": 1}], - txnNumber=s._server_session.transaction_id, + txnNumber=s._transaction_id, session=s, codec_options=DEFAULT_CODEC_OPTIONS.with_options( document_class=RawBSONDocument @@ -670,7 +712,7 @@ def raise_connection_err_select_server(*args, **kwargs): kwargs = copy.deepcopy(kwargs) kwargs["session"] = session msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" - initial_txn_id = session._server_session.transaction_id + initial_txn_id = session._transaction_id # Each operation should fail on the first attempt and succeed # on the second. @@ -678,7 +720,7 @@ def raise_connection_err_select_server(*args, **kwargs): self.assertEqual(len(listener.started_events), 1, msg) retry_cmd = listener.started_events[0].command sent_txn_id = retry_cmd["txnNumber"] - final_txn_id = session._server_session.transaction_id + final_txn_id = session._transaction_id self.assertEqual(Int64(initial_txn_id + 1), sent_txn_id, msg) self.assertEqual(sent_txn_id, final_txn_id, msg) diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index f687eab313..105ffaf034 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -83,19 +83,20 @@ def compare_events(expected_dict, actual): if expected["address"] != "{}:{}".format(*actual.server_address): return ( False, - "ServerOpeningEvent published with wrong address (expected" - " {}, got {}".format(expected["address"], actual.server_address), + "ServerOpeningEvent published with wrong address (expected" " {}, got {}".format( + expected["address"], actual.server_address + ), ) elif expected_type == "server_description_changed_event": - if not isinstance(actual, monitoring.ServerDescriptionChangedEvent): return (False, "Expected ServerDescriptionChangedEvent, got %s" % (actual.__class__)) if expected["address"] != "{}:{}".format(*actual.server_address): return ( False, - "ServerDescriptionChangedEvent has wrong address" - " (expected {}, got {}".format(expected["address"], actual.server_address), + "ServerDescriptionChangedEvent has wrong address" " (expected {}, got {}".format( + expected["address"], actual.server_address + ), ) if not compare_server_descriptions(expected["newDescription"], actual.new_description): @@ -114,8 +115,9 @@ def compare_events(expected_dict, actual): if expected["address"] != "{}:{}".format(*actual.server_address): return ( False, - "ServerClosedEvent published with wrong address" - " (expected {}, got {}".format(expected["address"], actual.server_address), + "ServerClosedEvent published with wrong address" " (expected {}, got {}".format( + expected["address"], actual.server_address + ), ) elif expected_type == "topology_opening_event": @@ -197,7 +199,7 @@ def _run(self): try: for phase in scenario_def["phases"]: - for (source, response) in phase.get("responses", []): + for source, response in phase.get("responses", []): source_address = clean_node(source) topology.on_change( ServerDescription( diff --git a/test/test_server_selection.py b/test/test_server_selection.py index 01f19ad87f..30a8aaa7a2 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -21,6 +21,7 @@ from pymongo import MongoClient, ReadPreference from pymongo.errors import ServerSelectionTimeoutError from pymongo.hello import HelloCompat +from pymongo.operations import _Op from pymongo.server_selectors import writable_server_selector from pymongo.settings import TopologySettings from pymongo.topology import Topology @@ -158,7 +159,7 @@ def test_latency_threshold_application(self): # Invoke server selection and assert no filtering based on latency # prior to custom server selection logic kicking in. - server = topology.select_server(ReadPreference.NEAREST) + server = topology.select_server(ReadPreference.NEAREST, _Op.TEST) assert selector.selection is not None self.assertEqual(len(selector.selection), len(topology.description.server_descriptions())) @@ -193,7 +194,7 @@ def test_server_selector_bypassed(self): # Invoke server selection and assert no calls to our custom selector. with self.assertRaisesRegex(ServerSelectionTimeoutError, "No primary available for writes"): - topology.select_server(writable_server_selector, server_selection_timeout=0.1) + topology.select_server(writable_server_selector, _Op.TEST, server_selection_timeout=0.1) self.assertEqual(selector.call_count, 0) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index 52873882f0..9dced595c9 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -28,6 +28,7 @@ from test.utils_selection_tests import create_topology from pymongo.common import clean_node +from pymongo.operations import _Op from pymongo.read_preferences import ReadPreference # Location of JSON test specifications. @@ -52,7 +53,7 @@ def run_scenario(self, scenario_def): # Number of times to repeat server selection iterations = scenario_def["iterations"] for _ in range(iterations): - server = topology.select_server(pref, server_selection_timeout=0) + server = topology.select_server(pref, _Op.TEST, server_selection_timeout=0) counts[server.description.address] += 1 # Verify expected_frequencies diff --git a/test/test_server_selection_logging.py b/test/test_server_selection_logging.py new file mode 100644 index 0000000000..2df749cb10 --- /dev/null +++ b/test/test_server_selection_logging.py @@ -0,0 +1,39 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the server selection logging unified format spec tests.""" +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "server_selection_logging") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_session.py b/test/test_session.py index c95691be15..c5cf77b754 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -116,16 +116,16 @@ def _test_ops(self, client, *ops): for f, args, kw in ops: with client.start_session() as s: + listener.reset() + s._materialize() last_use = s._server_session.last_use start = time.monotonic() self.assertLessEqual(last_use, start) - listener.reset() # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) kw["session"] = s f(*args, **kw) - self.assertGreaterEqual(s._server_session.last_use, start) self.assertGreaterEqual(len(listener.started_events), 1) for event in listener.started_events: self.assertTrue( @@ -274,6 +274,8 @@ def test_end_sessions(self): client = rs_or_single_client(event_listeners=[listener]) # Start many sessions. sessions = [client.start_session() for _ in range(_MAX_END_SESSIONS + 1)] + for s in sessions: + s._materialize() for s in sessions: s.end_session() diff --git a/test/test_son.py b/test/test_son.py index 579d765d8e..a06d92bcb2 100644 --- a/test/test_son.py +++ b/test/test_son.py @@ -85,7 +85,6 @@ def test_to_dict(self): self.assertEqual(SON, d4["blah"]["foo"].__class__) def test_pickle(self): - simple_son = SON([]) complex_son = SON([("son", simple_son), ("list", [simple_son, simple_son])]) diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index d7e9106626..29283f0ff2 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -28,7 +28,7 @@ from pymongo import common from pymongo.errors import ConfigurationError from pymongo.mongo_client import MongoClient -from pymongo.srv_resolver import _HAVE_DNSPYTHON +from pymongo.srv_resolver import _have_dnspython WAIT_TIME = 0.1 @@ -87,7 +87,6 @@ def __exit__(self, exc_type, exc_val, exc_tb): class TestSrvPolling(unittest.TestCase): - BASE_SRV_RESPONSE = [ ("localhost.test.build.10gen.cc", 27017), ("localhost.test.build.10gen.cc", 27018), @@ -96,8 +95,6 @@ class TestSrvPolling(unittest.TestCase): CONNECTION_STRING = "mongodb+srv://test1.test.build.10gen.cc" def setUp(self): - if not _HAVE_DNSPYTHON: - raise unittest.SkipTest("SRV polling tests require the dnspython module") # Patch timeouts to ensure short rescan SRV interval. self.client_knobs = client_knobs( heartbeat_frequency=WAIT_TIME, @@ -112,7 +109,7 @@ def tearDown(self): def get_nodelist(self, client): return client._topology.description.server_descriptions().keys() - def assert_nodelist_change(self, expected_nodelist, client): + def assert_nodelist_change(self, expected_nodelist, client, timeout=(100 * WAIT_TIME)): """Check if the client._topology eventually sees all nodes in the expected_nodelist. """ @@ -123,9 +120,9 @@ def predicate(): return True return False - wait_until(predicate, "see expected nodelist", timeout=100 * WAIT_TIME) + wait_until(predicate, "see expected nodelist", timeout=timeout) - def assert_nodelist_nochange(self, expected_nodelist, client): + def assert_nodelist_nochange(self, expected_nodelist, client, timeout=(100 * WAIT_TIME)): """Check if the client._topology ever deviates from seeing all nodes in the expected_nodelist. Consistency is checked after sleeping for (WAIT_TIME * 10) seconds. Also check that the resolver is called at @@ -137,7 +134,7 @@ def predicate(): return pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count >= 1 return False - wait_until(predicate, "Node list equals expected nodelist", timeout=100 * WAIT_TIME) + wait_until(predicate, "Node list equals expected nodelist", timeout=timeout) nodelist = self.get_nodelist(client) if set(expected_nodelist) != set(nodelist): @@ -151,6 +148,7 @@ def predicate(): return True def run_scenario(self, dns_response, expect_change): + self.assertEqual(_have_dnspython(), True) if callable(dns_response): dns_resolver_response = dns_response else: @@ -331,6 +329,28 @@ def nodelist_callback(): with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) + def test_srv_waits_to_poll(self): + modified = [("localhost.test.build.10gen.cc", 27019)] + + def resolver_response(): + return modified + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME, + nodelist_callback=resolver_response, + ): + client = MongoClient(self.CONNECTION_STRING) + self.assertRaises( + AssertionError, self.assert_nodelist_change, modified, client, timeout=WAIT_TIME / 2 + ) + + def test_import_dns_resolver(self): + # Regression test for PYTHON-4407 + import dns.resolver + + self.assertTrue(hasattr(dns.resolver, "resolve")) + if __name__ == "__main__": unittest.main() diff --git a/test/test_ssl.py b/test/test_ssl.py index bde385138c..3b307df39e 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -447,7 +447,8 @@ def test_validation_with_system_ca_certs(self): # Server cert and hostname are verified. connected( MongoClient( - "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=1000", **self.credentials # type: ignore[arg-type] + "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=1000", + **self.credentials, # type: ignore[arg-type] ) ) diff --git a/test/test_topology.py b/test/test_topology.py index 88c99d2a28..7662a0c028 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -17,6 +17,8 @@ import sys +from pymongo.operations import _Op + sys.path[0:0] = [""] from test import client_knobs, unittest @@ -30,11 +32,12 @@ from pymongo.monitor import Monitor from pymongo.pool import PoolOptions from pymongo.read_preferences import ReadPreference, Secondary +from pymongo.server import Server from pymongo.server_description import ServerDescription from pymongo.server_selectors import any_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.settings import TopologySettings -from pymongo.topology import Topology, _ErrorContext +from pymongo.topology import Topology, _ErrorContext, _filter_servers from pymongo.topology_description import TOPOLOGY_TYPE @@ -169,7 +172,7 @@ def test_direct_connection(self): # Can't select a server while the only server is of type Unknown. with self.assertRaisesRegex(ConnectionFailure, "No servers found yet"): - t.select_servers(any_server_selector, server_selection_timeout=0) + t.select_servers(any_server_selector, _Op.TEST, server_selection_timeout=0) got_hello(t, address, hello_response) @@ -178,7 +181,7 @@ def test_direct_connection(self): # No matter whether the server is writable, # select_servers() returns it. - s = t.select_server(writable_server_selector) + s = t.select_server(writable_server_selector, _Op.TEST) self.assertEqual(server_type, s.description.server_type) # Topology type single is always readable and writable regardless @@ -216,7 +219,7 @@ def _check_with_socket(self, *args, **kwargs): t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) - s = t.select_server(writable_server_selector) + s = t.select_server(writable_server_selector, _Op.TEST) self.assertEqual(125, s.description.round_trip_time) round_trip_time = 25 @@ -231,7 +234,7 @@ def _check_with_socket(self, *args, **kwargs): def raises_err(): try: - t.select_server(writable_server_selector, server_selection_timeout=0.1) + t.select_server(writable_server_selector, _Op.TEST, server_selection_timeout=0.1) except ConnectionFailure: return True else: @@ -452,7 +455,9 @@ def test_discover_set_name_from_primary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor # type: ignore[arg-type] + seeds=[address], + pool_class=MockPool, # type: ignore[arg-type] + monitor_class=DummyMonitor, # type: ignore[arg-type] ) t = Topology(topology_settings) @@ -480,7 +485,9 @@ def test_discover_set_name_from_secondary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor # type: ignore[arg-type] + seeds=[address], + pool_class=MockPool, # type: ignore[arg-type] + monitor_class=DummyMonitor, # type: ignore[arg-type] ) t = Topology(topology_settings) @@ -530,7 +537,7 @@ def test_wire_version(self): self.assertEqual(server.description.min_wire_version, 1) self.assertEqual(server.description.max_wire_version, 6) - t.select_servers(any_server_selector) + t.select_servers(any_server_selector, _Op.TEST) # Incompatible. got_hello( @@ -547,7 +554,7 @@ def test_wire_version(self): ) try: - t.select_servers(any_server_selector) + t.select_servers(any_server_selector, _Op.TEST) except ConfigurationError as e: # Error message should say which server failed and why. self.assertEqual( @@ -573,7 +580,7 @@ def test_wire_version(self): ) try: - t.select_servers(any_server_selector) + t.select_servers(any_server_selector, _Op.TEST) except ConfigurationError as e: # Error message should say which server failed and why. self.assertEqual( @@ -589,7 +596,7 @@ def test_max_write_batch_size(self): t = create_mock_topology(seeds=["a", "b"], replica_set_name="rs") def write_batch_size(): - s = t.select_server(writable_server_selector) + s = t.select_server(writable_server_selector, _Op.TEST) return s.description.max_write_batch_size got_hello( @@ -681,6 +688,23 @@ def test_unexpected_load_balancer(self): self.assertNotIn(("a", 27017), t.description.server_descriptions()) self.assertEqual(t.description.topology_type_name, "Unknown") + def test_filtered_server_selection(self): + s1 = Server(ServerDescription(("localhost", 27017)), pool=object(), monitor=object()) # type: ignore[arg-type] + s2 = Server(ServerDescription(("localhost2", 27017)), pool=object(), monitor=object()) # type: ignore[arg-type] + servers = [s1, s2] + + result = _filter_servers(servers, deprioritized_servers=[s2]) + self.assertEqual(result, [s1]) + + result = _filter_servers(servers, deprioritized_servers=[s1, s2]) + self.assertEqual(result, servers) + + result = _filter_servers(servers, deprioritized_servers=[]) + self.assertEqual(result, servers) + + result = _filter_servers(servers) + self.assertEqual(result, servers) + def wait_for_primary(topology): """Wait for a Topology to discover a writable server. @@ -693,7 +717,7 @@ def wait_for_primary(topology): def get_primary(): try: - return topology.select_server(writable_server_selector, 0) + return topology.select_server(writable_server_selector, _Op.TEST, 0) except ConnectionFailure: return None @@ -749,7 +773,7 @@ def _check_with_socket(self, *args, **kwargs): # The third hello call (the immediate retry) happens sometime soon # after the failed check triggered by request_check_all. Wait until # the server becomes known again. - server = t.select_server(writable_server_selector, 0.250) + server = t.select_server(writable_server_selector, _Op.TEST, 0.250) self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) self.assertEqual(3, hello_count[0]) @@ -763,13 +787,13 @@ def _check_with_socket(self, *args, **kwargs): t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) with self.assertRaisesRegex(ConnectionFailure, "internal error"): - t.select_server(any_server_selector, server_selection_timeout=0.5) + t.select_server(any_server_selector, _Op.TEST, server_selection_timeout=0.5) class TestServerSelectionErrors(TopologyTest): def assertMessage(self, message, topology, selector=any_server_selector): with self.assertRaises(ConnectionFailure) as context: - topology.select_server(selector, server_selection_timeout=0) + topology.select_server(selector, _Op.TEST, server_selection_timeout=0) self.assertIn(message, str(context.exception)) diff --git a/test/test_transactions.py b/test/test_transactions.py index 64b93f0b54..797b2e3740 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -50,9 +50,6 @@ from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -# Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "transactions", "legacy") - _TXN_TESTS_DEBUG = os.environ.get("TRANSACTION_TESTS_DEBUG") # Max number of operations to perform after a transaction to prove unpinning @@ -410,10 +407,6 @@ def __exit__(self, exc_type, exc_val, exc_tb): class TestTransactionsConvenientAPI(TransactionsBase): - TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "transactions-convenient-api" - ) - @client_context.require_transactions def test_callback_raises_custom_error(self): class _MyException(Exception): @@ -576,23 +569,5 @@ def callback(session): self.assertFalse(s.in_transaction) -def create_test(scenario_def, test, name): - @client_context.require_test_commands - @client_context.require_transactions - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -test_creator = SpecTestCreator(create_test, TestTransactions, TEST_PATH) -test_creator.create_tests() - - -SpecTestCreator( - create_test, TestTransactionsConvenientAPI, TestTransactionsConvenientAPI.TEST_PATH -).create_tests() - - if __name__ == "__main__": unittest.main() diff --git a/test/test_transactions_unified.py b/test/test_transactions_unified.py index 6de4902a81..81137bf658 100644 --- a/test/test_transactions_unified.py +++ b/test/test_transactions_unified.py @@ -20,14 +20,28 @@ sys.path[0:0] = [""] -from test import unittest +from test import client_context, unittest from test.unified_format import generate_test_classes + +@client_context.require_no_mmap +def setUpModule(): + pass + + # Location of JSON test specifications. TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "transactions", "unified") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) +# Location of JSON test specifications for transactions-convenient-api. +TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "transactions-convenient-api", "unified" +) + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + if __name__ == "__main__": unittest.main() diff --git a/test/test_typing.py b/test/test_typing.py index 3d6156ce2c..ae395c02e6 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -21,7 +21,18 @@ import sys import tempfile import unittest -from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List, Union +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + TypeVar, + Union, + cast, +) try: from typing_extensions import NotRequired, TypedDict @@ -77,13 +88,16 @@ def get_tests() -> Iterable[str]: yield os.path.join(dirpath, filename) -def only_type_check(func): +FuncT = TypeVar("FuncT", bound=Callable[..., None]) + + +def only_type_check(func: FuncT) -> FuncT: def inner(*args, **kwargs): if not TYPE_CHECKING: raise unittest.SkipTest("Used for Type Checking Only") func(*args, **kwargs) - return inner + return cast(FuncT, inner) class TestMypyFails(unittest.TestCase): diff --git a/test/test_unified_format.py b/test/test_unified_format.py index bc6dbcc5c2..1b3a134237 100644 --- a/test/test_unified_format.py +++ b/test/test_unified_format.py @@ -15,6 +15,7 @@ import os import sys +from typing import Any sys.path[0:0] = [""] @@ -58,7 +59,7 @@ def setUp(self): self.match_evaluator = MatchEvaluatorUtil(self) def test_unsetOrMatches(self): - spec = {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}} + spec: dict[str, Any] = {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}} for actual in [{}, {"y": 2}, None]: self.match_evaluator.match_result(spec, actual) diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index e1e59eb651..27f5fd2fbc 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -18,6 +18,7 @@ import copy import sys import warnings +from typing import Any from urllib.parse import quote_plus sys.path[0:0] = [""] @@ -27,7 +28,12 @@ from bson.binary import JAVA_LEGACY from pymongo import ReadPreference from pymongo.errors import ConfigurationError, InvalidURI -from pymongo.uri_parser import parse_uri, parse_userinfo, split_hosts, split_options +from pymongo.uri_parser import ( + parse_uri, + parse_userinfo, + split_hosts, + split_options, +) class TestURI(unittest.TestCase): @@ -143,6 +149,12 @@ def test_split_options(self): self.assertEqual({"authsource": "foobar"}, split_options("authSource=foobar")) self.assertEqual({"maxpoolsize": 50}, split_options("maxpoolsize=50")) + # Test suggestions given when invalid kwarg passed + + expected = r"Unknown option: auth. Did you mean one of \(authsource, authmechanism, timeoutms\) or maybe a camelCase version of one\? Refer to docstring." + with self.assertRaisesRegex(ConfigurationError, expected): + split_options("auth=GSSAPI") + def test_parse_uri(self): self.assertRaises(InvalidURI, parse_uri, "http://foobar.com") self.assertRaises(InvalidURI, parse_uri, "http://foo@foobar.com") @@ -462,15 +474,15 @@ def test_normalize_options(self): res = {"tls": True, "appname": "myapp"} self.assertEqual(res, parse_uri(uri)["options"]) - def test_unquote_after_parsing(self): - quoted_val = "val%21%40%23%24%25%5E%26%2A%28%29_%2B%2C%3A+etc" - unquoted_val = "val!@#$%^&*()_+,: etc" + def test_unquote_during_parsing(self): + quoted_val = "val%21%40%23%24%25%5E%26%2A%28%29_%2B%3A+etc" + unquoted_val = "val!@#$%^&*()_+: etc" uri = ( "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" "&authMechanismProperties=AWS_SESSION_TOKEN:" + quoted_val ) res = parse_uri(uri) - options = { + options: dict[str, Any] = { "authmechanism": "MONGODB-AWS", "authmechanismproperties": {"AWS_SESSION_TOKEN": unquoted_val}, } @@ -492,20 +504,30 @@ def test_unquote_after_parsing(self): self.assertEqual(options, res["options"]) def test_redact_AWS_SESSION_TOKEN(self): - unquoted_colon = "token:" + token = "token" uri = ( "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" - "&authMechanismProperties=AWS_SESSION_TOKEN:" + unquoted_colon + "&authMechanismProperties=AWS_SESSION_TOKEN-" + token ) with self.assertRaisesRegex( ValueError, - "auth mechanism properties must be key:value pairs like " - "SERVICE_NAME:mongodb, not AWS_SESSION_TOKEN:" - ", did you forget to percent-escape the token with " - "quote_plus?", + "Malformed auth mechanism properties", ): parse_uri(uri) + def test_handle_colon(self): + token = "token:foo" + uri = ( + "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" + "&authMechanismProperties=AWS_SESSION_TOKEN:" + token + ) + res = parse_uri(uri) + options = { + "authmechanism": "MONGODB-AWS", + "authMechanismProperties": {"AWS_SESSION_TOKEN": token}, + } + self.assertEqual(options, res["options"]) + def test_special_chars(self): user = "user@ /9+:?~!$&'()*+,;=" pwd = "pwd@ /9+:?~!$&'()*+,;=" diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index ad48fe787c..f483a03842 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -27,8 +27,7 @@ from test import clear_warning_registry, unittest from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, validate -from pymongo.compression_support import _HAVE_SNAPPY -from pymongo.srv_resolver import _HAVE_DNSPYTHON +from pymongo.compression_support import _have_snappy from pymongo.uri_parser import SRV_SCHEME, parse_uri CONN_STRING_TEST_PATH = os.path.join( @@ -83,7 +82,9 @@ def workdir_context_decorator(func): def modified_test_scenario(*args, **kwargs): original_workdir = os.getcwd() os.chdir(target_workdir) - func(*args, **kwargs) + with warnings.catch_warnings(): + warnings.simplefilter("default") + func(*args, **kwargs) os.chdir(original_workdir) return modified_test_scenario @@ -94,10 +95,8 @@ def modified_test_scenario(*args, **kwargs): def create_test(test, test_workdir): def run_scenario(self): compressors = (test.get("options") or {}).get("compressors", []) - if "snappy" in compressors and not _HAVE_SNAPPY: + if "snappy" in compressors and not _have_snappy(): self.skipTest("This test needs the snappy module.") - if test["uri"].startswith(SRV_SCHEME) and not _HAVE_DNSPYTHON: - self.skipTest("This test needs dnspython package.") valid = True warning = False expected_warning = test.get("warning", False) diff --git a/test/transactions-convenient-api/callback-aborts.json b/test/transactions-convenient-api/callback-aborts.json deleted file mode 100644 index 2a3038e8ba..0000000000 --- a/test/transactions-convenient-api/callback-aborts.json +++ /dev/null @@ -1,244 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "withTransaction succeeds if callback aborts", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "withTransaction succeeds if callback aborts with no ops", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "abortTransaction", - "object": "session0" - } - ] - } - } - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "withTransaction still succeeds if callback aborts and runs extra op", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "autocommit": null, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 2 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/callback-commits.json b/test/transactions-convenient-api/callback-commits.json deleted file mode 100644 index 4abbbdd0e6..0000000000 --- a/test/transactions-convenient-api/callback-commits.json +++ /dev/null @@ -1,303 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "withTransaction succeeds if callback commits", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "withTransaction still succeeds if callback commits and runs extra op", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "lsid": "session0", - "autocommit": null, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/callback-retry.json b/test/transactions-convenient-api/callback-retry.json deleted file mode 100644 index a0391c1b5d..0000000000 --- a/test/transactions-convenient-api/callback-retry.json +++ /dev/null @@ -1,315 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "callback succeeds after multiple connection errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "callback is not retried after non-transient error (DuplicateKeyError)", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ] - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ], - "errorContains": "E11000" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/commit-retry.json b/test/transactions-convenient-api/commit-retry.json deleted file mode 100644 index 02e38460d0..0000000000 --- a/test/transactions-convenient-api/commit-retry.json +++ /dev/null @@ -1,531 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "commitTransaction succeeds after multiple connection errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction retry only overwrites write concern w option", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - }, - "options": { - "writeConcern": { - "w": 2, - "j": true, - "wtimeout": 5000 - } - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": 2, - "j": true, - "wtimeout": 5000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "j": true, - "wtimeout": 5000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "j": true, - "wtimeout": 5000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commit is retried after commitTransaction UnknownTransactionCommitResult (NotWritablePrimary)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 10107, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commit is not retried after MaxTimeMSExpired error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 50 - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - }, - "options": { - "maxCommitTimeMS": 60000 - } - }, - "result": { - "errorCodeName": "MaxTimeMSExpired", - "errorLabelsContain": [ - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "maxTimeMS": 60000, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/commit-transienttransactionerror-4.2.json b/test/transactions-convenient-api/commit-transienttransactionerror-4.2.json deleted file mode 100644 index 7663bb54e1..0000000000 --- a/test/transactions-convenient-api/commit-transienttransactionerror-4.2.json +++ /dev/null @@ -1,197 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.1.6", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "transaction is retried after commitTransaction TransientTransactionError (PreparedTransactionInProgress)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 267, - "closeConnection": false - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/commit-transienttransactionerror.json b/test/transactions-convenient-api/commit-transienttransactionerror.json deleted file mode 100644 index 18becbe09c..0000000000 --- a/test/transactions-convenient-api/commit-transienttransactionerror.json +++ /dev/null @@ -1,725 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "transaction is retried after commitTransaction TransientTransactionError (LockTimeout)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 24, - "closeConnection": false - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "transaction is retried after commitTransaction TransientTransactionError (WriteConflict)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 112, - "closeConnection": false - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "transaction is retried after commitTransaction TransientTransactionError (SnapshotUnavailable)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 246, - "closeConnection": false - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "transaction is retried after commitTransaction TransientTransactionError (NoSuchTransaction)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 251, - "closeConnection": false - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/commit-writeconcernerror.json b/test/transactions-convenient-api/commit-writeconcernerror.json deleted file mode 100644 index fbad645546..0000000000 --- a/test/transactions-convenient-api/commit-writeconcernerror.json +++ /dev/null @@ -1,602 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "commitTransaction is retried after WriteConcernFailed timeout error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 64, - "codeName": "WriteConcernFailed", - "errmsg": "waiting for replication timed out", - "errInfo": { - "wtimeout": true - } - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction is retried after WriteConcernFailed non-timeout error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 64, - "codeName": "WriteConcernFailed", - "errmsg": "multiple errors reported" - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction is not retried after UnknownReplWriteConcern error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 79, - "codeName": "UnknownReplWriteConcern", - "errmsg": "No write concern mode named 'foo' found in replica set configuration" - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - }, - "result": { - "errorCodeName": "UnknownReplWriteConcern", - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 100, - "codeName": "UnsatisfiableWriteConcern", - "errmsg": "Not enough data-bearing nodes" - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - }, - "result": { - "errorCodeName": "UnsatisfiableWriteConcern", - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction is not retried after MaxTimeMSExpired error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 50, - "codeName": "MaxTimeMSExpired", - "errmsg": "operation exceeded time limit" - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - }, - "result": { - "errorCodeName": "MaxTimeMSExpired", - "errorLabelsContain": [ - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/commit.json b/test/transactions-convenient-api/commit.json deleted file mode 100644 index 0a7451db95..0000000000 --- a/test/transactions-convenient-api/commit.json +++ /dev/null @@ -1,286 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "withTransaction commits after callback returns", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "withTransaction commits after callback returns (second transaction)", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/transaction-options.json b/test/transactions-convenient-api/transaction-options.json deleted file mode 100644 index 6deff43cf4..0000000000 --- a/test/transactions-convenient-api/transaction-options.json +++ /dev/null @@ -1,577 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "withTransaction and no transaction options set", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "withTransaction inherits transaction options from client", - "useMultipleMongoses": true, - "clientOptions": { - "readConcernLevel": "local", - "w": 1 - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "local" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": 1 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "withTransaction inherits transaction options from defaultTransactionOptions", - "useMultipleMongoses": true, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": 1 - } - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": 1 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "withTransaction explicit transaction options", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - }, - "options": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": 1 - } - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": 1 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "withTransaction explicit transaction options override defaultTransactionOptions", - "useMultipleMongoses": true, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "snapshot" - }, - "writeConcern": { - "w": "majority" - } - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - }, - "options": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": 1 - } - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": 1 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "withTransaction explicit transaction options override client options", - "useMultipleMongoses": true, - "clientOptions": { - "readConcernLevel": "local", - "w": "majority" - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - }, - "options": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": 1 - } - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": 1 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/unified/callback-aborts.json b/test/transactions-convenient-api/unified/callback-aborts.json new file mode 100644 index 0000000000..206428715c --- /dev/null +++ b/test/transactions-convenient-api/unified/callback-aborts.json @@ -0,0 +1,344 @@ +{ + "description": "callback-aborts", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction succeeds if callback aborts", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ] + }, + { + "description": "withTransaction succeeds if callback aborts with no ops", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "abortTransaction", + "object": "session0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ] + }, + { + "description": "withTransaction still succeeds if callback aborts and runs extra op", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "autocommit": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/callback-commits.json b/test/transactions-convenient-api/unified/callback-commits.json new file mode 100644 index 0000000000..06f791e9ae --- /dev/null +++ b/test/transactions-convenient-api/unified/callback-commits.json @@ -0,0 +1,423 @@ +{ + "description": "callback-commits", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction succeeds if callback commits", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "withTransaction still succeeds if callback commits and runs extra op", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "autocommit": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/callback-retry.json b/test/transactions-convenient-api/unified/callback-retry.json new file mode 100644 index 0000000000..277dfa18ed --- /dev/null +++ b/test/transactions-convenient-api/unified/callback-retry.json @@ -0,0 +1,472 @@ +{ + "description": "callback-retry", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "callback succeeds after multiple connection errors", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "ignoreResultAndError": true + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "callback is not retried after non-transient error (DuplicateKeyError)", + "operations": [ + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorContains": "E11000" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-retry-errorLabels.json b/test/transactions-convenient-api/unified/commit-retry-errorLabels.json new file mode 100644 index 0000000000..c6a4e44d62 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-retry-errorLabels.json @@ -0,0 +1,231 @@ +{ + "description": "commit-retry-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commit is retried after commitTransaction UnknownTransactionCommitResult (NotWritablePrimary)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-retry.json b/test/transactions-convenient-api/unified/commit-retry.json new file mode 100644 index 0000000000..928f0167e4 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-retry.json @@ -0,0 +1,552 @@ +{ + "description": "commit-retry", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction succeeds after multiple connection errors", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction retry only overwrites write concern w option", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "writeConcern": { + "w": 2, + "journal": true, + "wtimeoutMS": 5000 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 2, + "j": true, + "wtimeout": 5000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "j": true, + "wtimeout": 5000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "j": true, + "wtimeout": 5000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commit is not retried after MaxTimeMSExpired error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 50 + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "maxCommitTimeMS": 60000 + }, + "expectError": { + "errorCodeName": "MaxTimeMSExpired", + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "maxTimeMS": 60000, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-transienttransactionerror-4.2.json b/test/transactions-convenient-api/unified/commit-transienttransactionerror-4.2.json new file mode 100644 index 0000000000..0f5a782452 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-transienttransactionerror-4.2.json @@ -0,0 +1,294 @@ +{ + "description": "commit-transienttransactionerror-4.2", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.1.6", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "transaction is retried after commitTransaction TransientTransactionError (PreparedTransactionInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 267, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-transienttransactionerror.json b/test/transactions-convenient-api/unified/commit-transienttransactionerror.json new file mode 100644 index 0000000000..dd5158d813 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-transienttransactionerror.json @@ -0,0 +1,996 @@ +{ + "description": "commit-transienttransactionerror", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "transaction is retried after commitTransaction TransientTransactionError (LockTimeout)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 24, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction is retried after commitTransaction TransientTransactionError (WriteConflict)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 112, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction is retried after commitTransaction TransientTransactionError (SnapshotUnavailable)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 246, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction is retried after commitTransaction TransientTransactionError (NoSuchTransaction)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 251, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-writeconcernerror.json b/test/transactions-convenient-api/unified/commit-writeconcernerror.json new file mode 100644 index 0000000000..a6f6e6bd7f --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-writeconcernerror.json @@ -0,0 +1,814 @@ +{ + "description": "commit-writeconcernerror", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction is retried after WriteConcernFailed timeout error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 64, + "codeName": "WriteConcernFailed", + "errmsg": "waiting for replication timed out", + "errInfo": { + "wtimeout": true + } + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction is retried after WriteConcernFailed non-timeout error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 64, + "codeName": "WriteConcernFailed", + "errmsg": "multiple errors reported" + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction is not retried after UnknownReplWriteConcern error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 79, + "codeName": "UnknownReplWriteConcern", + "errmsg": "No write concern mode named 'foo' found in replica set configuration" + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + }, + "expectError": { + "errorCodeName": "UnknownReplWriteConcern", + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes" + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + }, + "expectError": { + "errorCodeName": "UnsatisfiableWriteConcern", + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction is not retried after MaxTimeMSExpired error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 50, + "codeName": "MaxTimeMSExpired", + "errmsg": "operation exceeded time limit" + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + }, + "expectError": { + "errorCodeName": "MaxTimeMSExpired", + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit.json b/test/transactions-convenient-api/unified/commit.json new file mode 100644 index 0000000000..5684d5ee89 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit.json @@ -0,0 +1,398 @@ +{ + "description": "commit", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction commits after callback returns", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "withTransaction commits after callback returns (second transaction)", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/transaction-options.json b/test/transactions-convenient-api/unified/transaction-options.json new file mode 100644 index 0000000000..b1a74c5fd1 --- /dev/null +++ b/test/transactions-convenient-api/unified/transaction-options.json @@ -0,0 +1,819 @@ +{ + "description": "transaction-options", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction and no transaction options set", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction inherits transaction options from client", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction inherits transaction options from defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction explicit transaction options", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction explicit transaction options override defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "snapshot" + }, + "writeConcern": { + "w": "majority" + } + } + } + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction explicit transaction options override client options", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "uriOptions": { + "readConcernLevel": "local", + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/legacy/abort.json b/test/transactions/legacy/abort.json deleted file mode 100644 index 3729a98298..0000000000 --- a/test/transactions/legacy/abort.json +++ /dev/null @@ -1,621 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "abort", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "implicit abort", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "two aborts", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "abortTransaction", - "object": "session0", - "result": { - "errorContains": "cannot call abortTransaction twice" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abort without start", - "operations": [ - { - "name": "abortTransaction", - "object": "session0", - "result": { - "errorContains": "no transaction started" - } - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abort directly after no-op commit", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "abortTransaction", - "object": "session0", - "result": { - "errorContains": "Cannot call abortTransaction after calling commitTransaction" - } - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abort directly after commit", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "abortTransaction", - "object": "session0", - "result": { - "errorContains": "Cannot call abortTransaction after calling commitTransaction" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "abort ignores TransactionAborted", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ], - "errorContains": "E11000" - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorCodeName": "NoSuchTransaction", - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abort does not apply writeConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": 10 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions/legacy/bulk.json b/test/transactions/legacy/bulk.json deleted file mode 100644 index 8a9793b8b3..0000000000 --- a/test/transactions/legacy/bulk.json +++ /dev/null @@ -1,531 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "bulk", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "deletedCount": 1 - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "session": "session0", - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$set": { - "x": 1 - } - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$set": { - "x": 2 - } - }, - "upsert": true - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 4 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 5 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 6 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 7 - } - } - }, - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "y": 1 - } - } - }, - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 2 - }, - "replacement": { - "y": 2 - } - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 3 - } - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 4 - } - } - }, - { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gte": 2 - } - }, - "update": { - "$set": { - "z": 1 - } - } - } - }, - { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gte": 6 - } - } - } - } - ] - }, - "result": { - "deletedCount": 4, - "insertedCount": 6, - "insertedIds": { - "0": 1, - "3": 3, - "4": 4, - "5": 5, - "6": 6, - "7": 7 - }, - "matchedCount": 7, - "modifiedCount": 7, - "upsertedCount": 1, - "upsertedIds": { - "2": 2 - } - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 1 - }, - "limit": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "$set": { - "x": 1 - } - } - }, - { - "q": { - "_id": 2 - }, - "u": { - "$set": { - "x": 2 - } - }, - "upsert": true - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - }, - { - "_id": 5 - }, - { - "_id": 6 - }, - { - "_id": 7 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "y": 1 - } - }, - { - "q": { - "_id": 2 - }, - "u": { - "y": 2 - } - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 3 - }, - "limit": 1 - }, - { - "q": { - "_id": 4 - }, - "limit": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": { - "$gte": 2 - } - }, - "u": { - "$set": { - "z": 1 - } - }, - "multi": true - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$gte": 6 - } - }, - "limit": 0 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "y": 1 - }, - { - "_id": 2, - "y": 2, - "z": 1 - }, - { - "_id": 5, - "z": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/causal-consistency.json b/test/transactions/legacy/causal-consistency.json deleted file mode 100644 index 0e81bf2ff2..0000000000 --- a/test/transactions/legacy/causal-consistency.json +++ /dev/null @@ -1,305 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1, - "count": 0 - } - ], - "tests": [ - { - "description": "causal consistency", - "clientOptions": { - "retryWrites": false - }, - "operations": [ - { - "name": "updateOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "count": 1 - } - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "count": 1 - } - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "$inc": { - "count": 1 - } - } - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": null, - "txnNumber": null, - "startTransaction": null, - "autocommit": null, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "$inc": { - "count": 1 - } - } - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "count": 2 - } - ] - } - } - }, - { - "description": "causal consistency disabled", - "clientOptions": { - "retryWrites": false - }, - "sessionOptions": { - "session0": { - "causalConsistency": false - } - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "count": 1 - } - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": null, - "autocommit": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "$inc": { - "count": 1 - } - } - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "count": 1 - }, - { - "_id": 2 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/commit.json b/test/transactions/legacy/commit.json deleted file mode 100644 index faa39a65f1..0000000000 --- a/test/transactions/legacy/commit.json +++ /dev/null @@ -1,925 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "commit", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "rerun commit after empty transaction", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "multiple commits in a row", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "write concern error on commit", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": 10 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commit without start", - "operations": [ - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorContains": "no transaction started" - } - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "commit after no-op abort", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorContains": "Cannot call commitTransaction after calling abortTransaction" - } - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "commit after abort", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorContains": "Cannot call commitTransaction after calling abortTransaction" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ] - }, - { - "description": "multiple commits after empty transaction", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "reset session state commit", - "clientOptions": { - "retryWrites": false - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorContains": "no transaction started" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": null, - "startTransaction": null, - "autocommit": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "reset session state abort", - "clientOptions": { - "retryWrites": false - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0", - "result": { - "errorContains": "no transaction started" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": null, - "startTransaction": null, - "autocommit": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 2 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/count.json b/test/transactions/legacy/count.json deleted file mode 100644 index 169296416a..0000000000 --- a/test/transactions/legacy/count.json +++ /dev/null @@ -1,120 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0.2", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ], - "tests": [ - { - "description": "count", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "count", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorCodeName": "OperationNotSupportedInTransaction", - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "test", - "query": { - "_id": 1 - }, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "count", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/create-collection.json b/test/transactions/legacy/create-collection.json deleted file mode 100644 index 9071c59c41..0000000000 --- a/test/transactions/legacy/create-collection.json +++ /dev/null @@ -1,204 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.4", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "explicitly create collection using create command", - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "test" - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "session": "session0", - "collection": "test" - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test" - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "test", - "writeConcern": null - }, - "command_name": "drop", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "create": "test", - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "create", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - }, - { - "description": "implicitly create collection using insert", - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "test" - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test" - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "test", - "writeConcern": null - }, - "command_name": "drop", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/create-index.json b/test/transactions/legacy/create-index.json deleted file mode 100644 index 2ff09c9288..0000000000 --- a/test/transactions/legacy/create-index.json +++ /dev/null @@ -1,237 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.4", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "create index on a non-existing collection", - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "test" - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "session": "session0", - "name": "t_1", - "keys": { - "x": 1 - } - } - }, - { - "name": "assertIndexNotExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test", - "index": "t_1" - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test", - "index": "t_1" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "test", - "writeConcern": null - }, - "command_name": "drop", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "test", - "indexes": [ - { - "name": "t_1", - "key": { - "x": 1 - } - } - ], - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "createIndexes", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - }, - { - "description": "create index on a collection created within the same transaction", - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "test" - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "session": "session0", - "collection": "test" - } - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "session": "session0", - "name": "t_1", - "keys": { - "x": 1 - } - } - }, - { - "name": "assertIndexNotExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test", - "index": "t_1" - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test", - "index": "t_1" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "test", - "writeConcern": null - }, - "command_name": "drop", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "create": "test", - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "create", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "test", - "indexes": [ - { - "name": "t_1", - "key": { - "x": 1 - } - } - ], - "lsid": "session0", - "writeConcern": null - }, - "command_name": "createIndexes", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/delete.json b/test/transactions/legacy/delete.json deleted file mode 100644 index 65b8327039..0000000000 --- a/test/transactions/legacy/delete.json +++ /dev/null @@ -1,327 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - }, - { - "_id": 5 - } - ], - "tests": [ - { - "description": "delete", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "deletedCount": 1 - } - }, - { - "name": "deleteMany", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$lte": 3 - } - } - }, - "result": { - "deletedCount": 2 - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 4 - } - }, - "result": { - "deletedCount": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 1 - }, - "limit": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$lte": 3 - } - }, - "limit": 0 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 4 - }, - "limit": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 5 - } - ] - } - } - }, - { - "description": "collection writeConcern ignored for delete", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "deleteOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "deletedCount": 1 - } - }, - { - "name": "deleteMany", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$lte": 3 - } - } - }, - "result": { - "deletedCount": 2 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 1 - }, - "limit": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$lte": 3 - } - }, - "limit": 0 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/error-labels-blockConnection.json b/test/transactions/legacy/error-labels-blockConnection.json deleted file mode 100644 index 56b646f7ad..0000000000 --- a/test/transactions/legacy/error-labels-blockConnection.json +++ /dev/null @@ -1,159 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.2", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", - "clientOptions": { - "socketTimeoutMS": 100 - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "blockConnection": true, - "blockTimeMS": 150 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/error-labels.json b/test/transactions/legacy/error-labels.json deleted file mode 100644 index 0be19c731c..0000000000 --- a/test/transactions/legacy/error-labels.json +++ /dev/null @@ -1,2086 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ], - "serverless": "forbid" - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "DuplicateKey errors do not contain transient label", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "session": "session0", - "documents": [ - { - "_id": 1 - }, - { - "_id": 1 - } - ] - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ], - "errorContains": "E11000" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - }, - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "NotWritablePrimary errors contain transient label", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "WriteConflict errors contain transient label", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 112 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "NoSuchTransaction errors contain transient label", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 251 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "NoSuchTransaction errors on commit contain transient label", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 251 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "add TransientTransactionError label to connection errors, but do not add RetryableWriteError label", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 4 - }, - "data": { - "failCommands": [ - "insert", - "find", - "aggregate", - "distinct" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0" - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "session": "session0" - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "_id", - "session": "session0" - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": {}, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "test", - "key": "_id", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "distinct", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to retryable commit errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 11602, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to writeConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "do not add RetryableWriteError label to writeConcernError ShutdownInProgress that occurs within transaction", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [], - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernFailed", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 64, - "errmsg": "multiple errors reported" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernFailed with wtimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 64, - "codeName": "WriteConcernFailed", - "errmsg": "waiting for replication timed out", - "errInfo": { - "wtimeout": true - } - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "omit UnknownTransactionCommitResult label from writeConcernError UnsatisfiableWriteConcern", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 100, - "errmsg": "Not enough data-bearing nodes" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "omit UnknownTransactionCommitResult label from writeConcernError UnknownReplWriteConcern", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 79, - "errmsg": "No write concern mode named 'blah' found in replica set configuration" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsOmit": [ - "RetryableWriteConcern", - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "do not add UnknownTransactionCommitResult label to MaxTimeMSExpired inside transactions", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 50 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "maxTimeMS": 60000, - "session": "session0" - }, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult", - "TransientTransactionError" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": {}, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "maxTimeMS": 60000 - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "add UnknownTransactionCommitResult label to MaxTimeMSExpired", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 50 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - }, - "maxCommitTimeMS": 60000 - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "add UnknownTransactionCommitResult label to writeConcernError MaxTimeMSExpired", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 50, - "errmsg": "operation exceeded time limit" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - }, - "maxCommitTimeMS": 60000 - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/findOneAndDelete.json b/test/transactions/legacy/findOneAndDelete.json deleted file mode 100644 index d82657a9f5..0000000000 --- a/test/transactions/legacy/findOneAndDelete.json +++ /dev/null @@ -1,221 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "tests": [ - { - "description": "findOneAndDelete", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - } - }, - "result": { - "_id": 3 - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 4 - } - }, - "result": null - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "remove": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 4 - }, - "remove": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "collection writeConcern ignored for findOneAndDelete", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - } - }, - "result": { - "_id": 3 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "remove": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/findOneAndReplace.json b/test/transactions/legacy/findOneAndReplace.json deleted file mode 100644 index 7a54ca3433..0000000000 --- a/test/transactions/legacy/findOneAndReplace.json +++ /dev/null @@ -1,255 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "tests": [ - { - "description": "findOneAndReplace", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - }, - "replacement": { - "x": 1 - }, - "returnDocument": "Before" - }, - "result": { - "_id": 3 - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 4 - }, - "replacement": { - "x": 1 - }, - "upsert": true, - "returnDocument": "After" - }, - "result": { - "_id": 4, - "x": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "update": { - "x": 1 - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 4 - }, - "update": { - "x": 1 - }, - "new": true, - "upsert": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3, - "x": 1 - }, - { - "_id": 4, - "x": 1 - } - ] - } - } - }, - { - "description": "collection writeConcern ignored for findOneAndReplace", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - }, - "replacement": { - "x": 1 - }, - "returnDocument": "Before" - }, - "result": { - "_id": 3 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "update": { - "x": 1 - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/findOneAndUpdate.json b/test/transactions/legacy/findOneAndUpdate.json deleted file mode 100644 index 7af54ba808..0000000000 --- a/test/transactions/legacy/findOneAndUpdate.json +++ /dev/null @@ -1,413 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "tests": [ - { - "description": "findOneAndUpdate", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 3 - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true, - "returnDocument": "After" - }, - "result": { - "_id": 4, - "x": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 3, - "x": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 3, - "x": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": true, - "upsert": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "afterClusterTime": 42 - }, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "afterClusterTime": 42 - }, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3, - "x": 2 - }, - { - "_id": 4, - "x": 1 - } - ] - } - } - }, - { - "description": "collection writeConcern ignored for findOneAndUpdate", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 3 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/insert.json b/test/transactions/legacy/insert.json deleted file mode 100644 index f26e7c2a76..0000000000 --- a/test/transactions/legacy/insert.json +++ /dev/null @@ -1,648 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "insert", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "session": "session0" - }, - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 4 - } - }, - "result": { - "insertedId": 4 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 5 - } - }, - "result": { - "insertedId": 5 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 5 - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - }, - { - "_id": 5 - } - ] - } - } - }, - { - "description": "insert with session1", - "operations": [ - { - "name": "startTransaction", - "object": "session1" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session1", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "session": "session1" - }, - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - } - }, - { - "name": "commitTransaction", - "object": "session1" - }, - { - "name": "startTransaction", - "object": "session1" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session1", - "document": { - "_id": 4 - } - }, - "result": { - "insertedId": 4 - } - }, - { - "name": "abortTransaction", - "object": "session1" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session1", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "ordered": true, - "lsid": "session1", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session1", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session1", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session1", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - }, - { - "description": "collection writeConcern without transaction", - "clientOptions": { - "retryWrites": false - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": null, - "startTransaction": null, - "autocommit": null, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "collection writeConcern ignored for insert", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "documents": [ - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "session": "session0" - }, - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/mongos-recovery-token.json b/test/transactions/legacy/mongos-recovery-token.json deleted file mode 100644 index da4e9861d1..0000000000 --- a/test/transactions/legacy/mongos-recovery-token.json +++ /dev/null @@ -1,511 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ], - "serverless": "forbid" - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "commitTransaction explicit retries include recoveryToken", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction retry succeeds on new mongos", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - } - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - }, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction retry fails on new mongos", - "useMultipleMongoses": true, - "clientOptions": { - "heartbeatFrequencyMS": 30000 - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 7 - }, - "data": { - "failCommands": [ - "commitTransaction", - "isMaster", - "hello" - ], - "closeConnection": true - } - } - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "UnknownTransactionCommitResult" - ], - "errorCodeName": "NoSuchTransaction" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction sends recoveryToken", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "closeConnection": true - } - } - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions/legacy/read-concern.json b/test/transactions/legacy/read-concern.json deleted file mode 100644 index dd9243e2f7..0000000000 --- a/test/transactions/legacy/read-concern.json +++ /dev/null @@ -1,1628 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ], - "tests": [ - { - "description": "only first countDocuments includes readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - } - } - } - }, - { - "name": "countDocuments", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 2 - } - } - }, - "result": 3 - }, - { - "name": "countDocuments", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 2 - } - } - }, - "result": 3 - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$match": { - "_id": { - "$gte": 2 - } - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ], - "cursor": {}, - "lsid": "session0", - "readConcern": { - "level": "majority" - }, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$match": { - "_id": { - "$gte": 2 - } - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ], - "cursor": {}, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "only first find includes readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - } - } - } - }, - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": { - "level": "majority" - }, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "only first aggregate includes readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - } - } - } - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": { - "batchSize": 3 - }, - "lsid": "session0", - "readConcern": { - "level": "majority" - }, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": { - "batchSize": 3 - }, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "only first distinct includes readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - } - } - } - }, - { - "name": "distinct", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "fieldName": "_id" - }, - "result": [ - 1, - 2, - 3, - 4 - ] - }, - { - "name": "distinct", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "fieldName": "_id" - }, - "result": [ - 1, - 2, - 3, - 4 - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "test", - "key": "_id", - "lsid": "session0", - "readConcern": { - "level": "majority" - }, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "distinct", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "test", - "key": "_id", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "distinct", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "only first runCommand includes readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "find", - "arguments": { - "session": "session0", - "command": { - "find": "test" - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "find", - "arguments": { - "session": "session0", - "command": { - "find": "test" - } - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "lsid": "session0", - "readConcern": { - "level": "majority" - }, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "countDocuments ignores collection readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "countDocuments", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 2 - } - } - }, - "result": 3 - }, - { - "name": "countDocuments", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 2 - } - } - }, - "result": 3 - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$match": { - "_id": { - "$gte": 2 - } - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ], - "cursor": {}, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$match": { - "_id": { - "$gte": 2 - } - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ], - "cursor": {}, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "find ignores collection readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "aggregate ignores collection readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": { - "batchSize": 3 - }, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": { - "batchSize": 3 - }, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "distinct ignores collection readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "distinct", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "fieldName": "_id" - }, - "result": [ - 1, - 2, - 3, - 4 - ] - }, - { - "name": "distinct", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "fieldName": "_id" - }, - "result": [ - 1, - 2, - 3, - 4 - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "test", - "key": "_id", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "distinct", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "test", - "key": "_id", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "distinct", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "runCommand ignores database readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "runCommand", - "object": "database", - "databaseOptions": { - "readConcern": { - "level": "majority" - } - }, - "command_name": "find", - "arguments": { - "session": "session0", - "command": { - "find": "test" - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "find", - "arguments": { - "session": "session0", - "command": { - "find": "test" - } - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/reads.json b/test/transactions/legacy/reads.json deleted file mode 100644 index 9fc587f482..0000000000 --- a/test/transactions/legacy/reads.json +++ /dev/null @@ -1,543 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ], - "tests": [ - { - "description": "collection readConcern without transaction", - "operations": [ - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "readConcern": { - "level": "majority" - }, - "lsid": "session0", - "txnNumber": null, - "startTransaction": null, - "autocommit": null - }, - "command_name": "find", - "database_name": "transaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "find", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "batchSize": 3, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "batchSize": 3, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "aggregate", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": { - "batchSize": 3 - }, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": { - "batchSize": 3 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "distinct", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "session": "session0", - "fieldName": "_id" - }, - "result": [ - 1, - 2, - 3, - 4 - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "test", - "key": "_id", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "distinct", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/retryable-abort-errorLabels.json b/test/transactions/legacy/retryable-abort-errorLabels.json deleted file mode 100644 index 1110ce2c32..0000000000 --- a/test/transactions/legacy/retryable-abort-errorLabels.json +++ /dev/null @@ -1,204 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "abortTransaction only retries once with RetryableWriteError from server", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 112, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction does not retry without RetryableWriteError label", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 11600, - "errorLabels": [] - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions/legacy/retryable-abort.json b/test/transactions/legacy/retryable-abort.json deleted file mode 100644 index 13cc7c88fb..0000000000 --- a/test/transactions/legacy/retryable-abort.json +++ /dev/null @@ -1,2017 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "abortTransaction only performs a single retry", - "clientOptions": { - "retryWrites": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction does not retry after Interrupted", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 11601, - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction does not retry after WriteConcernError Interrupted", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "writeConcernError": { - "code": 11601, - "errmsg": "operation was interrupted" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after connection error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 10107, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 13436, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 13435, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 11602, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 11600, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 91, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 7, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 6, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 9001, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 89, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after WriteConcernError InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 11600, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after WriteConcernError InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 11602, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after WriteConcernError PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 189, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions/legacy/retryable-commit-errorLabels.json b/test/transactions/legacy/retryable-commit-errorLabels.json deleted file mode 100644 index e0818f237b..0000000000 --- a/test/transactions/legacy/retryable-commit-errorLabels.json +++ /dev/null @@ -1,223 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "commitTransaction does not retry error without RetryableWriteError label", - "clientOptions": { - "retryWrites": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 11600, - "errorLabels": [] - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "commitTransaction retries once with RetryableWriteError from server", - "clientOptions": { - "retryWrites": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 112, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/retryable-commit.json b/test/transactions/legacy/retryable-commit.json deleted file mode 100644 index 49148c62d2..0000000000 --- a/test/transactions/legacy/retryable-commit.json +++ /dev/null @@ -1,2336 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "commitTransaction fails after two errors", - "clientOptions": { - "retryWrites": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction applies majority write concern on retries", - "clientOptions": { - "retryWrites": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": 2, - "j": true, - "wtimeout": 5000 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": 2, - "j": true, - "wtimeout": 5000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "j": true, - "wtimeout": 5000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "j": true, - "wtimeout": 5000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction fails after Interrupted", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 11601, - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorCodeName": "Interrupted", - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 100, - "errmsg": "Not enough data-bearing nodes" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after connection error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 10107, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 13436, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 13435, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 11602, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 11600, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 91, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 7, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 6, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 9001, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 89, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after WriteConcernError InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 11600, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after WriteConcernError InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 11602, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after WriteConcernError PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 189, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/retryable-writes.json b/test/transactions/legacy/retryable-writes.json deleted file mode 100644 index c932893b5b..0000000000 --- a/test/transactions/legacy/retryable-writes.json +++ /dev/null @@ -1,343 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "increment txnNumber", - "clientOptions": { - "retryWrites": true - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 4 - }, - { - "_id": 5 - } - ], - "session": "session0" - }, - "result": { - "insertedIds": { - "0": 4, - "1": 5 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - }, - { - "_id": 5 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "4" - }, - "startTransaction": null, - "autocommit": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 4 - }, - { - "_id": 5 - } - ] - } - } - }, - { - "description": "writes are not retried", - "clientOptions": { - "retryWrites": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions/legacy/run-command.json b/test/transactions/legacy/run-command.json deleted file mode 100644 index 2f2a3a8815..0000000000 --- a/test/transactions/legacy/run-command.json +++ /dev/null @@ -1,306 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "run command with default read preference", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "runCommand", - "object": "database", - "command_name": "insert", - "arguments": { - "session": "session0", - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ] - } - }, - "result": { - "n": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - }, - { - "description": "run command with secondary read preference in client option and primary read preference in transaction options", - "clientOptions": { - "readPreference": "secondary" - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readPreference": { - "mode": "Primary" - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "insert", - "arguments": { - "session": "session0", - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ] - } - }, - "result": { - "n": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - }, - { - "description": "run command with explicit primary read preference", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "runCommand", - "object": "database", - "command_name": "insert", - "arguments": { - "session": "session0", - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ] - }, - "readPreference": { - "mode": "Primary" - } - }, - "result": { - "n": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - }, - { - "description": "run command fails with explicit secondary read preference", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "runCommand", - "object": "database", - "command_name": "find", - "arguments": { - "session": "session0", - "command": { - "find": "test" - }, - "readPreference": { - "mode": "Secondary" - } - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - } - ] - }, - { - "description": "run command fails with secondary read preference from transaction options", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readPreference": { - "mode": "Secondary" - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "find", - "arguments": { - "session": "session0", - "command": { - "find": "test" - } - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/transaction-options-repl.json b/test/transactions/legacy/transaction-options-repl.json deleted file mode 100644 index 33324debb8..0000000000 --- a/test/transactions/legacy/transaction-options-repl.json +++ /dev/null @@ -1,181 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "readConcern snapshot in startTransaction options", - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "majority" - } - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "snapshot" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "snapshot" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "snapshot" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "snapshot", - "afterClusterTime": 42 - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/transaction-options.json b/test/transactions/legacy/transaction-options.json deleted file mode 100644 index 25d245dca5..0000000000 --- a/test/transactions/legacy/transaction-options.json +++ /dev/null @@ -1,1404 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "no transaction options set", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "afterClusterTime": 42 - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "transaction options inherited from client", - "clientOptions": { - "w": 1, - "readConcernLevel": "local" - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "local" - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": 1 - }, - "maxTimeMS": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "local", - "afterClusterTime": 42 - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": 1 - }, - "maxTimeMS": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "transaction options inherited from defaultTransactionOptions", - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": 1 - }, - "maxCommitTimeMS": 60000 - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": 1 - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority", - "afterClusterTime": 42 - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": 1 - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "startTransaction options override defaults", - "clientOptions": { - "readConcernLevel": "local", - "w": 1 - }, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "snapshot" - }, - "writeConcern": { - "w": 1 - }, - "maxCommitTimeMS": 30000 - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": "majority" - }, - "maxCommitTimeMS": 60000 - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority", - "afterClusterTime": 42 - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - }, - "maxTimeMS": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "defaultTransactionOptions override client options", - "clientOptions": { - "readConcernLevel": "local", - "w": 1 - }, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": "majority" - }, - "maxCommitTimeMS": 60000 - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority", - "afterClusterTime": 42 - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - }, - "maxTimeMS": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "readConcern local in defaultTransactionOptions", - "clientOptions": { - "w": 1 - }, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "local" - } - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "local" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": 1 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "local", - "afterClusterTime": 42 - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": 1 - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "client writeConcern ignored for bulk", - "clientOptions": { - "w": "majority" - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": 1 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1 - } - } - } - ], - "session": "session0" - }, - "result": { - "deletedCount": 0, - "insertedCount": 1, - "insertedIds": { - "0": 1 - }, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": 1 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "readPreference inherited from client", - "clientOptions": { - "readPreference": "secondary" - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "readPreference inherited from defaultTransactionOptions", - "clientOptions": { - "readPreference": "primary" - }, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readPreference": { - "mode": "Secondary" - } - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "startTransaction overrides readPreference", - "clientOptions": { - "readPreference": "primary" - }, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readPreference": { - "mode": "Primary" - } - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readPreference": { - "mode": "Secondary" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/update.json b/test/transactions/legacy/update.json deleted file mode 100644 index e33bf5b810..0000000000 --- a/test/transactions/legacy/update.json +++ /dev/null @@ -1,442 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "tests": [ - { - "description": "update", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - }, - "result": { - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 1, - "upsertedId": 4 - } - }, - { - "name": "replaceOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "x": 1 - }, - "replacement": { - "y": 1 - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "updateMany", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 3 - } - }, - "update": { - "$set": { - "z": 1 - } - } - }, - "result": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 4 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "x": 1 - }, - "u": { - "y": 1 - } - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": { - "$gte": 3 - } - }, - "u": { - "$set": { - "z": 1 - } - }, - "multi": true - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3, - "z": 1 - }, - { - "_id": 4, - "y": 1, - "z": 1 - } - ] - } - } - }, - { - "description": "collections writeConcern ignored for update", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "updateOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - }, - "result": { - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 1, - "upsertedId": 4 - } - }, - { - "name": "replaceOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "x": 1 - }, - "replacement": { - "y": 1 - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "updateMany", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 3 - } - }, - "update": { - "$set": { - "z": 1 - } - } - }, - "result": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 4 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "x": 1 - }, - "u": { - "y": 1 - } - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": { - "$gte": 3 - } - }, - "u": { - "$set": { - "z": 1 - } - }, - "multi": true - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/write-concern.json b/test/transactions/legacy/write-concern.json deleted file mode 100644 index 84b1ea3650..0000000000 --- a/test/transactions/legacy/write-concern.json +++ /dev/null @@ -1,1278 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 0 - } - ], - "tests": [ - { - "description": "commit with majority", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - }, - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commit with default", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - }, - { - "_id": 1 - } - ] - } - } - }, - { - "description": "abort with majority", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - } - ] - } - } - }, - { - "description": "abort with default", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - } - ] - } - } - }, - { - "description": "start with unacknowledged write concern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": 0 - } - } - }, - "result": { - "errorContains": "transactions do not support unacknowledged write concern" - } - } - ] - }, - { - "description": "start with implicit unacknowledged write concern", - "clientOptions": { - "w": 0 - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "result": { - "errorContains": "transactions do not support unacknowledged write concern" - } - } - ] - }, - { - "description": "unacknowledged write concern coll insertOne", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - }, - { - "_id": 1 - } - ] - } - } - }, - { - "description": "unacknowledged write concern coll insertMany", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertMany", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "result": { - "insertedIds": { - "0": 1, - "1": 2 - } - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - }, - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unacknowledged write concern coll bulkWrite", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "bulkWrite", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1 - } - } - } - ] - }, - "result": { - "deletedCount": 0, - "insertedCount": 1, - "insertedIds": { - "0": 1 - }, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - }, - { - "_id": 1 - } - ] - } - } - }, - { - "description": "unacknowledged write concern coll deleteOne", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "deleteOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - } - }, - "result": { - "deletedCount": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 0 - }, - "limit": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "unacknowledged write concern coll deleteMany", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "deleteMany", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - } - }, - "result": { - "deletedCount": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 0 - }, - "limit": 0 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "unacknowledged write concern coll updateOne", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "updateOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 0 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0, - "x": 1 - } - ] - } - } - }, - { - "description": "unacknowledged write concern coll updateMany", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "updateMany", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 0 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true, - "upsert": true - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0, - "x": 1 - } - ] - } - } - }, - { - "description": "unacknowledged write concern coll findOneAndDelete", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndDelete", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - } - }, - "result": { - "_id": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 0 - }, - "remove": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "unacknowledged write concern coll findOneAndReplace", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndReplace", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - }, - "replacement": { - "x": 1 - }, - "returnDocument": "Before" - }, - "result": { - "_id": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 0 - }, - "update": { - "x": 1 - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0, - "x": 1 - } - ] - } - } - }, - { - "description": "unacknowledged write concern coll findOneAndUpdate", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 0 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0, - "x": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/unified/abort.json b/test/transactions/unified/abort.json new file mode 100644 index 0000000000..c151a7d0c6 --- /dev/null +++ b/test/transactions/unified/abort.json @@ -0,0 +1,828 @@ +{ + "description": "abort", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "implicit abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "endSession" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "two aborts", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "abortTransaction", + "expectError": { + "errorContains": "cannot call abortTransaction twice" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abort without start", + "operations": [ + { + "object": "session0", + "name": "abortTransaction", + "expectError": { + "errorContains": "no transaction started" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abort directly after no-op commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "abortTransaction", + "expectError": { + "errorContains": "Cannot call abortTransaction after calling commitTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abort directly after commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "abortTransaction", + "expectError": { + "errorContains": "Cannot call abortTransaction after calling commitTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "abort ignores TransactionAborted", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorContains": "E11000" + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorCodeName": "NoSuchTransaction", + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abort does not apply writeConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 10 + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/bulk.json b/test/transactions/unified/bulk.json new file mode 100644 index 0000000000..ece162518f --- /dev/null +++ b/test/transactions/unified/bulk.json @@ -0,0 +1,652 @@ +{ + "description": "bulk", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "bulk", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "session": "session0", + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": 2 + } + }, + "upsert": true + } + }, + { + "insertOne": { + "document": { + "_id": 3 + } + } + }, + { + "insertOne": { + "document": { + "_id": 4 + } + } + }, + { + "insertOne": { + "document": { + "_id": 5 + } + } + }, + { + "insertOne": { + "document": { + "_id": 6 + } + } + }, + { + "insertOne": { + "document": { + "_id": 7 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 2 + }, + "replacement": { + "y": 2 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 4 + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "update": { + "$set": { + "z": 1 + } + } + } + }, + { + "deleteMany": { + "filter": { + "_id": { + "$gte": 6 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 4, + "insertedCount": 6, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "3": 3, + "4": 4, + "5": 5, + "6": 6, + "7": 7 + } + }, + "matchedCount": 7, + "modifiedCount": 7, + "upsertedCount": 1, + "upsertedIds": { + "2": 2 + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": 2 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + }, + { + "_id": 7 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "y": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "y": 2 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + }, + { + "q": { + "_id": 4 + }, + "limit": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gte": 2 + } + }, + "u": { + "$set": { + "z": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$gte": 6 + } + }, + "limit": 0 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "y": 1 + }, + { + "_id": 2, + "y": 2, + "z": 1 + }, + { + "_id": 5, + "z": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/causal-consistency.json b/test/transactions/unified/causal-consistency.json new file mode 100644 index 0000000000..52a6cb8180 --- /dev/null +++ b/test/transactions/unified/causal-consistency.json @@ -0,0 +1,426 @@ +{ + "description": "causal-consistency", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session_no_cc", + "client": "client0", + "sessionOptions": { + "causalConsistency": false + } + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "count": 0 + } + ] + } + ], + "tests": [ + { + "description": "causal consistency", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "count": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "count": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "count": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "count": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "count": 2 + } + ] + } + ] + }, + { + "description": "causal consistency disabled", + "operations": [ + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session_no_cc", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session_no_cc", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session_no_cc", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "count": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session_no_cc", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session_no_cc" + }, + "txnNumber": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "count": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session_no_cc" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session_no_cc" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "count": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/commit.json b/test/transactions/unified/commit.json new file mode 100644 index 0000000000..ab778d8df2 --- /dev/null +++ b/test/transactions/unified/commit.json @@ -0,0 +1,1234 @@ +{ + "description": "commit", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "rerun commit after empty transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "multiple commits in a row", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "write concern error on commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 10 + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commit without start", + "operations": [ + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorContains": "no transaction started" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "commit after no-op abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorContains": "Cannot call commitTransaction after calling abortTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "commit after abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorContains": "Cannot call commitTransaction after calling abortTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "multiple commits after empty transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "reset session state commit", + "operations": [ + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorContains": "no transaction started" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "reset session state abort", + "operations": [ + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction", + "expectError": { + "errorContains": "no transaction started" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/count.json b/test/transactions/unified/count.json new file mode 100644 index 0000000000..404b06beb6 --- /dev/null +++ b/test/transactions/unified/count.json @@ -0,0 +1,177 @@ +{ + "description": "count", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0.2", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ], + "tests": [ + { + "description": "count", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorCodeName": "OperationNotSupportedInTransaction", + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "test", + "query": { + "_id": 1 + }, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "count", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/create-collection.json b/test/transactions/unified/create-collection.json new file mode 100644 index 0000000000..e190088b3b --- /dev/null +++ b/test/transactions/unified/create-collection.json @@ -0,0 +1,282 @@ +{ + "description": "create-collection", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "explicitly create collection using create command", + "operations": [ + { + "object": "database0", + "name": "dropCollection", + "arguments": { + "collection": "test" + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "createCollection", + "arguments": { + "session": "session0", + "collection": "test" + } + }, + { + "object": "testRunner", + "name": "assertCollectionNotExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertCollectionExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "create", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "implicitly create collection using insert", + "operations": [ + { + "object": "database0", + "name": "dropCollection", + "arguments": { + "collection": "test" + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "testRunner", + "name": "assertCollectionNotExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertCollectionExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/create-index.json b/test/transactions/unified/create-index.json new file mode 100644 index 0000000000..98d6e11547 --- /dev/null +++ b/test/transactions/unified/create-index.json @@ -0,0 +1,313 @@ +{ + "description": "create-index", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "create index on a non-existing collection", + "operations": [ + { + "object": "database0", + "name": "dropCollection", + "arguments": { + "collection": "test" + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "createIndex", + "arguments": { + "session": "session0", + "name": "t_1", + "keys": { + "x": 1 + } + } + }, + { + "object": "testRunner", + "name": "assertIndexNotExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "t_1" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertIndexExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "t_1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "test", + "indexes": [ + { + "name": "t_1", + "key": { + "x": 1 + } + } + ], + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "createIndexes", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "create index on a collection created within the same transaction", + "operations": [ + { + "object": "database0", + "name": "dropCollection", + "arguments": { + "collection": "test" + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "createCollection", + "arguments": { + "session": "session0", + "collection": "test" + } + }, + { + "object": "collection0", + "name": "createIndex", + "arguments": { + "session": "session0", + "name": "t_1", + "keys": { + "x": 1 + } + } + }, + { + "object": "testRunner", + "name": "assertIndexNotExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "t_1" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertIndexExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "t_1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "create", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "test", + "indexes": [ + { + "name": "t_1", + "key": { + "x": 1 + } + } + ], + "lsid": { + "$$sessionLsid": "session0" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "createIndexes", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/delete.json b/test/transactions/unified/delete.json new file mode 100644 index 0000000000..4c1cae0a4e --- /dev/null +++ b/test/transactions/unified/delete.json @@ -0,0 +1,425 @@ +{ + "description": "delete", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + ], + "tests": [ + { + "description": "delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$lte": 3 + } + } + }, + "expectResult": { + "deletedCount": 2 + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$lte": 3 + } + }, + "limit": 0 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 4 + }, + "limit": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 5 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for delete", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "collection_wc_majority", + "name": "deleteMany", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$lte": 3 + } + } + }, + "expectResult": { + "deletedCount": 2 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$lte": 3 + } + }, + "limit": 0 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/error-labels-blockConnection.json b/test/transactions/unified/error-labels-blockConnection.json new file mode 100644 index 0000000000..8da04d1005 --- /dev/null +++ b/test/transactions/unified/error-labels-blockConnection.json @@ -0,0 +1,235 @@ +{ + "description": "error-labels-blockConnection", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "uriOptions": { + "socketTimeoutMS": 100 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/error-labels-errorLabels.json b/test/transactions/unified/error-labels-errorLabels.json new file mode 100644 index 0000000000..1f95ad3419 --- /dev/null +++ b/test/transactions/unified/error-labels-errorLabels.json @@ -0,0 +1,423 @@ +{ + "description": "error-labels-errorLabels", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "serverless": "forbid", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to retryable commit errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to writeConcernError ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/error-labels.json b/test/transactions/unified/error-labels.json new file mode 100644 index 0000000000..be8df10ed3 --- /dev/null +++ b/test/transactions/unified/error-labels.json @@ -0,0 +1,2264 @@ +{ + "description": "error-labels", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "DuplicateKey errors do not contain transient label", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 1 + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorContains": "E11000" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + }, + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "NotWritablePrimary errors contain transient label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "WriteConflict errors contain transient label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 112 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "NoSuchTransaction errors contain transient label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 251 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "NoSuchTransaction errors on commit contain transient label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 251 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "add TransientTransactionError label to connection errors, but do not add RetryableWriteError label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert", + "find", + "aggregate", + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "session": "session0" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": {}, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "do not add RetryableWriteError label to writeConcernError ShutdownInProgress that occurs within transaction", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernFailed", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 64, + "errmsg": "multiple errors reported" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernFailed with wtimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 64, + "codeName": "WriteConcernFailed", + "errmsg": "waiting for replication timed out", + "errInfo": { + "wtimeout": true + } + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "omit UnknownTransactionCommitResult label from writeConcernError UnsatisfiableWriteConcern", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 100, + "errmsg": "Not enough data-bearing nodes" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "omit UnknownTransactionCommitResult label from writeConcernError UnknownReplWriteConcern", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 79, + "errmsg": "No write concern mode named 'blah' found in replica set configuration" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteConcern", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "do not add UnknownTransactionCommitResult label to MaxTimeMSExpired inside transactions", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 50 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "maxTimeMS": 60000, + "session": "session0" + }, + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": {}, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "maxTimeMS": 60000 + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "add UnknownTransactionCommitResult label to MaxTimeMSExpired", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 50 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + }, + "maxCommitTimeMS": 60000 + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "add UnknownTransactionCommitResult label to writeConcernError MaxTimeMSExpired", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 50, + "errmsg": "operation exceeded time limit" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + }, + "maxCommitTimeMS": 60000 + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/errors-client.json b/test/transactions/unified/errors-client.json new file mode 100644 index 0000000000..00f1497c2d --- /dev/null +++ b/test/transactions/unified/errors-client.json @@ -0,0 +1,142 @@ +{ + "description": "errors-client", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Client side error in command starting transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "x": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "object": "testRunner", + "name": "assertSessionTransactionState", + "arguments": { + "session": "session0", + "state": "starting" + } + } + ] + }, + { + "description": "Client side error when transaction is in progress", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "x": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "object": "testRunner", + "name": "assertSessionTransactionState", + "arguments": { + "session": "session0", + "state": "in_progress" + } + } + ] + } + ] +} diff --git a/test/transactions/legacy/errors.json b/test/transactions/unified/errors.json similarity index 52% rename from test/transactions/legacy/errors.json rename to test/transactions/unified/errors.json index 5fc4905e8c..94a9cac207 100644 --- a/test/transactions/legacy/errors.json +++ b/test/transactions/unified/errors.json @@ -1,52 +1,101 @@ { - "runOn": [ + "description": "errors", + "schemaVersion": "1.3", + "runOnRequirements": [ { "minServerVersion": "4.0", - "topology": [ + "topologies": [ "replicaset" ] }, { "minServerVersion": "4.1.8", - "topology": [ - "sharded" + "topologies": [ + "sharded", + "load-balanced" ] } ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], "tests": [ { "description": "start insert start", "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 1 } }, - "result": { - "insertedId": 1 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } } }, { - "name": "startTransaction", "object": "session0", - "result": { + "name": "startTransaction", + "expectError": { + "isClientError": true, "errorContains": "transaction already in progress" } }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" } ] }, @@ -54,13 +103,14 @@ "description": "start twice", "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { - "name": "startTransaction", "object": "session0", - "result": { + "name": "startTransaction", + "expectError": { + "isClientError": true, "errorContains": "transaction already in progress" } } @@ -70,34 +120,39 @@ "description": "commit and start twice", "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 1 } }, - "result": { - "insertedId": 1 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } } }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { - "name": "startTransaction", "object": "session0", - "result": { + "name": "startTransaction", + "expectError": { + "isClientError": true, "errorContains": "transaction already in progress" } } @@ -107,36 +162,40 @@ "description": "write conflict commit", "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 1 } }, - "result": { - "insertedId": 1 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } } }, { - "name": "startTransaction", - "object": "session1" + "object": "session1", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session1", "document": { "_id": 1 } }, - "result": { + "expectError": { "errorCodeName": "WriteConflict", "errorLabelsContain": [ "TransientTransactionError" @@ -147,13 +206,13 @@ } }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "commitTransaction", "object": "session1", - "result": { + "name": "commitTransaction", + "expectError": { "errorCodeName": "NoSuchTransaction", "errorLabelsContain": [ "TransientTransactionError" @@ -169,36 +228,40 @@ "description": "write conflict abort", "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 1 } }, - "result": { - "insertedId": 1 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } } }, { - "name": "startTransaction", - "object": "session1" + "object": "session1", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session1", "document": { "_id": 1 } }, - "result": { + "expectError": { "errorCodeName": "WriteConflict", "errorLabelsContain": [ "TransientTransactionError" @@ -209,12 +272,12 @@ } }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "abortTransaction", - "object": "session1" + "object": "session1", + "name": "abortTransaction" } ] } diff --git a/test/transactions/unified/findOneAndDelete.json b/test/transactions/unified/findOneAndDelete.json new file mode 100644 index 0000000000..7db9c872af --- /dev/null +++ b/test/transactions/unified/findOneAndDelete.json @@ -0,0 +1,317 @@ +{ + "description": "findOneAndDelete", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + } + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "remove": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 4 + }, + "remove": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for findOneAndDelete", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "findOneAndDelete", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + } + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "remove": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/findOneAndReplace.json b/test/transactions/unified/findOneAndReplace.json new file mode 100644 index 0000000000..d9248244b3 --- /dev/null +++ b/test/transactions/unified/findOneAndReplace.json @@ -0,0 +1,352 @@ +{ + "description": "findOneAndReplace", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "replacement": { + "x": 1 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 1 + }, + "upsert": true, + "returnDocument": "After" + }, + "expectResult": { + "_id": 4, + "x": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "x": 1 + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 4 + }, + "update": { + "x": 1 + }, + "new": true, + "upsert": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3, + "x": 1 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for findOneAndReplace", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "findOneAndReplace", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "replacement": { + "x": 1 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "x": 1 + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/findOneAndUpdate.json b/test/transactions/unified/findOneAndUpdate.json new file mode 100644 index 0000000000..34a40bb570 --- /dev/null +++ b/test/transactions/unified/findOneAndUpdate.json @@ -0,0 +1,538 @@ +{ + "description": "findOneAndUpdate", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "returnDocument": "After" + }, + "expectResult": { + "_id": 4, + "x": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3, + "x": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3, + "x": 2 + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": true, + "upsert": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3, + "x": 2 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for findOneAndUpdate", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/insert.json b/test/transactions/unified/insert.json new file mode 100644 index 0000000000..9a80d8bf4b --- /dev/null +++ b/test/transactions/unified/insert.json @@ -0,0 +1,895 @@ +{ + "description": "insert", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 4 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 5 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 5 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 5 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + ] + }, + { + "description": "insert with session1", + "operations": [ + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "session": "session1" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 4 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 4 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "collection writeConcern without transaction", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection_wc_majority", + "database": "database1", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "session": { + "id": "session2", + "client": "client1" + } + } + ] + } + }, + { + "object": "collection_wc_majority", + "name": "insertOne", + "arguments": { + "session": "session2", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session2" + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for insert", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection_wc_majority", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/legacy/isolation.json b/test/transactions/unified/isolation.json similarity index 52% rename from test/transactions/legacy/isolation.json rename to test/transactions/unified/isolation.json index f16b28a5e6..5d0a0139fb 100644 --- a/test/transactions/legacy/isolation.json +++ b/test/transactions/unified/isolation.json @@ -1,225 +1,281 @@ { - "runOn": [ + "description": "isolation", + "schemaVersion": "1.3", + "runOnRequirements": [ { "minServerVersion": "4.0", - "topology": [ + "topologies": [ "replicaset" ] }, { "minServerVersion": "4.1.8", - "topology": [ - "sharded" + "topologies": [ + "sharded", + "load-balanced" ] } ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], "tests": [ { "description": "one transaction", "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 1 } }, - "result": { - "insertedId": 1 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } } }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": [ + "expectResult": [ { "_id": 1 } ] }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "session": "session1", "filter": { "_id": 1 } }, - "result": [] + "expectResult": [] }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "filter": { "_id": 1 } }, - "result": [] + "expectResult": [] }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "session": "session1", "filter": { "_id": 1 } }, - "result": [ + "expectResult": [ { "_id": 1 } ] }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "filter": { "_id": 1 } }, - "result": [ + "expectResult": [ { "_id": 1 } ] } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 } ] } - } + ] }, { "description": "two transactions", "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { - "name": "startTransaction", - "object": "session1" + "object": "session1", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 1 } }, - "result": { - "insertedId": 1 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } } }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": [ + "expectResult": [ { "_id": 1 } ] }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "session": "session1", "filter": { "_id": 1 } }, - "result": [] + "expectResult": [] }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "filter": { "_id": 1 } }, - "result": [] + "expectResult": [] }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "session": "session1", "filter": { "_id": 1 } }, - "result": [] + "expectResult": [] }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "filter": { "_id": 1 } }, - "result": [ + "expectResult": [ { "_id": 1 } ] }, { - "name": "commitTransaction", - "object": "session1" + "object": "session1", + "name": "commitTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 } ] } - } + ] } ] } diff --git a/test/transactions/legacy/mongos-pin-auto.json b/test/transactions/unified/mongos-pin-auto.json similarity index 69% rename from test/transactions/legacy/mongos-pin-auto.json rename to test/transactions/unified/mongos-pin-auto.json index 037f212f49..93eac8bb77 100644 --- a/test/transactions/legacy/mongos-pin-auto.json +++ b/test/transactions/unified/mongos-pin-auto.json @@ -1,48 +1,88 @@ { - "runOn": [ + "description": "mongos-pin-auto", + "schemaVersion": "1.4", + "runOnRequirements": [ { "minServerVersion": "4.1.8", - "topology": [ + "topologies": [ "sharded" ], "serverless": "forbid" } ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ + "createEntities": [ { - "_id": 1 + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } }, { - "_id": 2 + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] } ], "tests": [ { "description": "remain pinned after non-transient Interrupted error on insertOne", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { - "name": "targetedFailPoint", "object": "testRunner", + "name": "targetedFailPoint", "arguments": { "session": "session0", "failPoint": { @@ -60,15 +100,15 @@ } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 4 } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError", "UnknownTransactionCommitResult" @@ -77,85 +117,114 @@ } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" } ], - "expectations": [ + "expectEvents": [ { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } }, - "command_name": "commitTransaction", - "database_name": "admin" - } + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -167,32 +236,35 @@ } ] } - } + ] }, { "description": "unpin after transient error within a transaction", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { - "name": "targetedFailPoint", "object": "testRunner", + "name": "targetedFailPoint", "arguments": { "session": "session0", "failPoint": { @@ -210,15 +282,15 @@ } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 4 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ], @@ -228,85 +300,114 @@ } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, + "expectEvents": [ { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } }, - "command_name": "abortTransaction", - "database_name": "admin" - } + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -315,27 +416,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on insertOne insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -359,34 +463,36 @@ }, { "name": "insertOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "document": { "_id": 4 } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -395,27 +501,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on insertMany insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -439,7 +548,7 @@ }, { "name": "insertMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "documents": [ @@ -451,27 +560,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -480,27 +591,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on updateOne update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -524,7 +638,7 @@ }, { "name": "updateOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -536,27 +650,29 @@ } } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -565,27 +681,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on replaceOne update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -609,7 +728,7 @@ }, { "name": "replaceOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -619,27 +738,29 @@ "y": 1 } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -648,27 +769,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on updateMany update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -692,7 +816,7 @@ }, { "name": "updateMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -706,27 +830,29 @@ } } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -735,27 +861,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on deleteOne delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -779,34 +908,36 @@ }, { "name": "deleteOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -815,27 +946,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on deleteMany delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -859,7 +993,7 @@ }, { "name": "deleteMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -868,27 +1002,29 @@ } } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -897,27 +1033,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on findOneAndDelete findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -941,34 +1080,36 @@ }, { "name": "findOneAndDelete", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -977,27 +1118,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on findOneAndUpdate findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1021,7 +1165,7 @@ }, { "name": "findOneAndUpdate", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -1034,27 +1178,29 @@ }, "returnDocument": "Before" }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1063,27 +1209,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on findOneAndReplace findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1107,7 +1256,7 @@ }, { "name": "findOneAndReplace", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -1118,27 +1267,29 @@ }, "returnDocument": "Before" }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1147,27 +1298,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on bulkWrite insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1191,13 +1345,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "insertOne", - "arguments": { + "insertOne": { "document": { "_id": 1 } @@ -1205,27 +1358,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1234,27 +1389,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on bulkWrite update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1278,13 +1436,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": { "_id": 1 }, @@ -1297,27 +1454,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1326,27 +1485,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on bulkWrite delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1370,13 +1532,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "deleteOne", - "arguments": { + "deleteOne": { "filter": { "_id": 1 } @@ -1384,27 +1545,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1413,27 +1576,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on find find", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1457,34 +1623,36 @@ }, { "name": "find", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1493,27 +1661,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on countDocuments aggregate", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1537,32 +1708,34 @@ }, { "name": "countDocuments", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": {} }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1571,27 +1744,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on aggregate aggregate", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1615,32 +1791,34 @@ }, { "name": "aggregate", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "pipeline": [] }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1649,27 +1827,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on distinct distinct", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1693,32 +1874,35 @@ }, { "name": "distinct", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", - "fieldName": "_id" + "fieldName": "_id", + "filter": {} }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1727,27 +1911,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on runCommand insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1771,10 +1958,10 @@ }, { "name": "runCommand", - "object": "database", - "command_name": "insert", + "object": "database0", "arguments": { "session": "session0", + "commandName": "insert", "command": { "insert": "test", "documents": [ @@ -1784,27 +1971,29 @@ ] } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1813,27 +2002,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on insertOne insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1857,34 +2049,36 @@ }, { "name": "insertOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "document": { "_id": 4 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1893,27 +2087,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on insertOne insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1937,34 +2134,36 @@ }, { "name": "insertOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "document": { "_id": 4 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1973,27 +2172,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on insertMany insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2017,7 +2219,7 @@ }, { "name": "insertMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "documents": [ @@ -2029,27 +2231,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2058,27 +2262,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on insertMany insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2102,7 +2309,7 @@ }, { "name": "insertMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "documents": [ @@ -2114,27 +2321,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2143,27 +2352,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on updateOne update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2187,7 +2399,7 @@ }, { "name": "updateOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2199,27 +2411,29 @@ } } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2228,27 +2442,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on updateOne update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2272,7 +2489,7 @@ }, { "name": "updateOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2284,27 +2501,29 @@ } } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2313,27 +2532,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on replaceOne update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2357,7 +2579,7 @@ }, { "name": "replaceOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2367,27 +2589,29 @@ "y": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2396,27 +2620,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on replaceOne update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2440,7 +2667,7 @@ }, { "name": "replaceOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2450,27 +2677,29 @@ "y": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2479,27 +2708,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on updateMany update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2523,7 +2755,7 @@ }, { "name": "updateMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2537,27 +2769,29 @@ } } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2566,27 +2800,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on updateMany update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2610,7 +2847,7 @@ }, { "name": "updateMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2624,27 +2861,29 @@ } } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2653,27 +2892,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on deleteOne delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2697,34 +2939,36 @@ }, { "name": "deleteOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2733,27 +2977,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on deleteOne delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2777,34 +3024,36 @@ }, { "name": "deleteOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2813,27 +3062,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on deleteMany delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2857,7 +3109,7 @@ }, { "name": "deleteMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2866,27 +3118,29 @@ } } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2895,27 +3149,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on deleteMany delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2939,7 +3196,7 @@ }, { "name": "deleteMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2948,27 +3205,29 @@ } } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2977,27 +3236,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on findOneAndDelete findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3021,34 +3283,36 @@ }, { "name": "findOneAndDelete", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3057,27 +3321,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on findOneAndDelete findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3101,34 +3368,36 @@ }, { "name": "findOneAndDelete", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3137,27 +3406,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on findOneAndUpdate findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3181,7 +3453,7 @@ }, { "name": "findOneAndUpdate", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -3194,27 +3466,29 @@ }, "returnDocument": "Before" }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3223,27 +3497,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on findOneAndUpdate findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3267,7 +3544,7 @@ }, { "name": "findOneAndUpdate", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -3280,27 +3557,29 @@ }, "returnDocument": "Before" }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3309,27 +3588,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on findOneAndReplace findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3353,7 +3635,7 @@ }, { "name": "findOneAndReplace", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -3364,27 +3646,29 @@ }, "returnDocument": "Before" }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3393,27 +3677,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on findOneAndReplace findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3437,7 +3724,7 @@ }, { "name": "findOneAndReplace", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -3448,27 +3735,29 @@ }, "returnDocument": "Before" }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3477,27 +3766,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on bulkWrite insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3521,13 +3813,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "insertOne", - "arguments": { + "insertOne": { "document": { "_id": 1 } @@ -3535,27 +3826,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3564,27 +3857,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on bulkWrite insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3608,13 +3904,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "insertOne", - "arguments": { + "insertOne": { "document": { "_id": 1 } @@ -3622,27 +3917,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3651,27 +3948,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on bulkWrite update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3695,13 +3995,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": { "_id": 1 }, @@ -3714,27 +4013,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3743,27 +4044,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on bulkWrite update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3787,13 +4091,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": { "_id": 1 }, @@ -3806,27 +4109,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3835,27 +4140,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on bulkWrite delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3879,13 +4187,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "deleteOne", - "arguments": { + "deleteOne": { "filter": { "_id": 1 } @@ -3893,27 +4200,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3922,27 +4231,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on bulkWrite delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3966,13 +4278,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "deleteOne", - "arguments": { + "deleteOne": { "filter": { "_id": 1 } @@ -3980,27 +4291,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4009,27 +4322,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on find find", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4053,34 +4369,36 @@ }, { "name": "find", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4089,27 +4407,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on find find", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4133,34 +4454,36 @@ }, { "name": "find", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4169,27 +4492,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on countDocuments aggregate", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4213,32 +4539,34 @@ }, { "name": "countDocuments", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": {} }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4247,27 +4575,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on countDocuments aggregate", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4291,32 +4622,34 @@ }, { "name": "countDocuments", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": {} }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4325,27 +4658,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on aggregate aggregate", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4369,32 +4705,34 @@ }, { "name": "aggregate", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "pipeline": [] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4403,27 +4741,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on aggregate aggregate", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4447,32 +4788,34 @@ }, { "name": "aggregate", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "pipeline": [] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4481,27 +4824,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on distinct distinct", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4525,32 +4871,35 @@ }, { "name": "distinct", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", - "fieldName": "_id" + "fieldName": "_id", + "filter": {} }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4559,27 +4908,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on distinct distinct", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4603,32 +4955,35 @@ }, { "name": "distinct", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", - "fieldName": "_id" + "fieldName": "_id", + "filter": {} }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4637,27 +4992,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on runCommand insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4681,10 +5039,10 @@ }, { "name": "runCommand", - "object": "database", - "command_name": "insert", + "object": "database0", "arguments": { "session": "session0", + "commandName": "insert", "command": { "insert": "test", "documents": [ @@ -4694,27 +5052,29 @@ ] } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4723,27 +5083,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on runCommand insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4767,10 +5130,10 @@ }, { "name": "runCommand", - "object": "database", - "command_name": "insert", + "object": "database0", "arguments": { "session": "session0", + "commandName": "insert", "command": { "insert": "test", "documents": [ @@ -4780,27 +5143,29 @@ ] } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4809,7 +5174,7 @@ } ] } - } + ] } ] } diff --git a/test/transactions/unified/mongos-recovery-token-errorLabels.json b/test/transactions/unified/mongos-recovery-token-errorLabels.json new file mode 100644 index 0000000000..13345c6a29 --- /dev/null +++ b/test/transactions/unified/mongos-recovery-token-errorLabels.json @@ -0,0 +1,211 @@ +{ + "description": "mongos-recovery-token-errorLabels", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "serverless": "forbid", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction retry succeeds on new mongos", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/mongos-recovery-token.json b/test/transactions/unified/mongos-recovery-token.json new file mode 100644 index 0000000000..00909c4218 --- /dev/null +++ b/test/transactions/unified/mongos-recovery-token.json @@ -0,0 +1,566 @@ +{ + "description": "mongos-recovery-token", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.1.8", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction explicit retries include recoveryToken", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction retry fails on new mongos", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "uriOptions": { + "heartbeatFrequencyMS": 30000 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 7 + }, + "data": { + "failCommands": [ + "commitTransaction", + "isMaster", + "hello" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ], + "errorCodeName": "NoSuchTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction sends recoveryToken", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/mongos-unpin.json b/test/transactions/unified/mongos-unpin.json index 4f7ae43794..4d1ebc87bc 100644 --- a/test/transactions/unified/mongos-unpin.json +++ b/test/transactions/unified/mongos-unpin.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.2", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -52,7 +52,10 @@ "description": "unpin after TransientTransactionError error on commit", "runOnRequirements": [ { - "serverless": "forbid" + "serverless": "forbid", + "topologies": [ + "sharded" + ] } ], "operations": [ @@ -163,7 +166,10 @@ "description": "unpin after non-transient error on abort", "runOnRequirements": [ { - "serverless": "forbid" + "serverless": "forbid", + "topologies": [ + "sharded" + ] } ], "operations": [ @@ -233,6 +239,13 @@ }, { "description": "unpin after TransientTransactionError error on abort", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], "operations": [ { "name": "startTransaction", diff --git a/test/transactions/legacy/pin-mongos.json b/test/transactions/unified/pin-mongos.json similarity index 51% rename from test/transactions/legacy/pin-mongos.json rename to test/transactions/unified/pin-mongos.json index 485a3d9322..5f2ecca5c1 100644 --- a/test/transactions/legacy/pin-mongos.json +++ b/test/transactions/unified/pin-mongos.json @@ -1,128 +1,167 @@ { - "runOn": [ + "description": "pin-mongos", + "schemaVersion": "1.9", + "runOnRequirements": [ { "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ], - "serverless": "forbid" + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] } ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ + "createEntities": [ { - "_id": 1 + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } }, { - "_id": 2 + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] } ], "tests": [ { "description": "countDocuments", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "countDocuments", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": 1 + "expectResult": 1 }, { + "object": "collection0", "name": "countDocuments", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": 1 + "expectResult": 1 }, { + "object": "collection0", "name": "countDocuments", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": 1 + "expectResult": 1 }, { + "object": "collection0", "name": "countDocuments", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": 1 + "expectResult": 1 }, { + "object": "collection0", "name": "countDocuments", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": 1 + "expectResult": 1 }, { + "object": "collection0", "name": "countDocuments", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": 1 + "expectResult": 1 }, { + "object": "collection0", "name": "countDocuments", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": 1 + "expectResult": 1 }, { + "object": "collection0", "name": "countDocuments", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": 1 + "expectResult": 1 }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -131,120 +170,129 @@ } ] } - } + ] }, { "description": "distinct", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "distinct", - "object": "collection", "arguments": { "fieldName": "_id", + "filter": {}, "session": "session0" }, - "result": [ + "expectResult": [ 1, 2 ] }, { + "object": "collection0", "name": "distinct", - "object": "collection", "arguments": { "fieldName": "_id", + "filter": {}, "session": "session0" }, - "result": [ + "expectResult": [ 1, 2 ] }, { + "object": "collection0", "name": "distinct", - "object": "collection", "arguments": { "fieldName": "_id", + "filter": {}, "session": "session0" }, - "result": [ + "expectResult": [ 1, 2 ] }, { + "object": "collection0", "name": "distinct", - "object": "collection", "arguments": { "fieldName": "_id", + "filter": {}, "session": "session0" }, - "result": [ + "expectResult": [ 1, 2 ] }, { + "object": "collection0", "name": "distinct", - "object": "collection", "arguments": { "fieldName": "_id", + "filter": {}, "session": "session0" }, - "result": [ + "expectResult": [ 1, 2 ] }, { + "object": "collection0", "name": "distinct", - "object": "collection", "arguments": { "fieldName": "_id", + "filter": {}, "session": "session0" }, - "result": [ + "expectResult": [ 1, 2 ] }, { + "object": "collection0", "name": "distinct", - "object": "collection", "arguments": { "fieldName": "_id", + "filter": {}, "session": "session0" }, - "result": [ + "expectResult": [ 1, 2 ] }, { + "object": "collection0", "name": "distinct", - "object": "collection", "arguments": { "fieldName": "_id", + "filter": {}, "session": "session0" }, - "result": [ + "expectResult": [ 1, 2 ] }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -253,144 +301,145 @@ } ] } - } + ] }, { "description": "find", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": [ + "expectResult": [ { "_id": 2 } ] }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": [ + "expectResult": [ { "_id": 2 } ] }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": [ + "expectResult": [ { "_id": 2 } ] }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": [ + "expectResult": [ { "_id": 2 } ] }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": [ + "expectResult": [ { "_id": 2 } ] }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": [ + "expectResult": [ { "_id": 2 } ] }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": [ + "expectResult": [ { "_id": 2 } ] }, { + "object": "collection0", "name": "find", - "object": "collection", "arguments": { "filter": { "_id": 2 }, "session": "session0" }, - "result": [ + "expectResult": [ { "_id": 2 } ] }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -399,128 +448,161 @@ } ] } - } + ] }, { "description": "insertOne", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "document": { "_id": 3 }, "session": "session0" }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "document": { "_id": 4 }, "session": "session0" }, - "result": { - "insertedId": 4 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 4 + } + } } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "document": { "_id": 5 }, "session": "session0" }, - "result": { - "insertedId": 5 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 5 + } + } } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "document": { "_id": 6 }, "session": "session0" }, - "result": { - "insertedId": 6 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 6 + } + } } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "document": { "_id": 7 }, "session": "session0" }, - "result": { - "insertedId": 7 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 7 + } + } } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "document": { "_id": 8 }, "session": "session0" }, - "result": { - "insertedId": 8 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 8 + } + } } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "document": { "_id": 9 }, "session": "session0" }, - "result": { - "insertedId": 9 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 9 + } + } } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "document": { "_id": 10 }, "session": "session0" }, - "result": { - "insertedId": 10 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 10 + } + } } }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -553,144 +635,165 @@ } ] } - } + ] }, { "description": "mixed read write operations", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "document": { "_id": 3 }, "session": "session0" }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { + "object": "collection0", "name": "countDocuments", - "object": "collection", "arguments": { "filter": { "_id": 3 }, "session": "session0" }, - "result": 1 + "expectResult": 1 }, { + "object": "collection0", "name": "countDocuments", - "object": "collection", "arguments": { "filter": { "_id": 3 }, "session": "session0" }, - "result": 1 + "expectResult": 1 }, { + "object": "collection0", "name": "countDocuments", - "object": "collection", "arguments": { "filter": { "_id": 3 }, "session": "session0" }, - "result": 1 + "expectResult": 1 }, { + "object": "collection0", "name": "countDocuments", - "object": "collection", "arguments": { "filter": { "_id": 3 }, "session": "session0" }, - "result": 1 + "expectResult": 1 }, { + "object": "collection0", "name": "countDocuments", - "object": "collection", "arguments": { "filter": { "_id": 3 }, "session": "session0" }, - "result": 1 + "expectResult": 1 }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "document": { "_id": 4 }, "session": "session0" }, - "result": { - "insertedId": 4 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 4 + } + } } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "document": { "_id": 5 }, "session": "session0" }, - "result": { - "insertedId": 5 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 5 + } + } } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "document": { "_id": 6 }, "session": "session0" }, - "result": { - "insertedId": 6 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 6 + } + } } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "document": { "_id": 7 }, "session": "session0" }, - "result": { - "insertedId": 7 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 7 + } + } } }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -714,19 +817,18 @@ } ] } - } + ] }, { "description": "multiple commits", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertMany", - "object": "collection", "arguments": { "documents": [ { @@ -738,78 +840,84 @@ ], "session": "session0" }, - "result": { - "insertedIds": { - "0": 3, - "1": 4 + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "1": 4 + } + } } } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -824,19 +932,25 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient error on commit", - "useMultipleMongoses": true, + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertMany", - "object": "collection", "arguments": { "documents": [ { @@ -848,23 +962,27 @@ ], "session": "session0" }, - "result": { - "insertedIds": { - "0": 3, - "1": 4 + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "1": 4 + } + } } } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "targetedFailPoint", "object": "testRunner", + "name": "targetedFailPoint", "arguments": { "session": "session0", "failPoint": { @@ -882,9 +1000,9 @@ } }, { - "name": "commitTransaction", "object": "session0", - "result": { + "name": "commitTransaction", + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ], @@ -892,27 +1010,29 @@ } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -927,32 +1047,42 @@ } ] } - } + ] }, { "description": "unpin after transient error within a transaction", - "useMultipleMongoses": true, + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { - "name": "targetedFailPoint", "object": "testRunner", + "name": "targetedFailPoint", "arguments": { "session": "session0", "failPoint": { @@ -970,15 +1100,15 @@ } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 4 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ], @@ -988,78 +1118,107 @@ } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, + "expectEvents": [ { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } }, - "command_name": "abortTransaction", - "database_name": "admin" - } + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1068,37 +1227,84 @@ } ] } - } + ] }, { "description": "unpin after transient error within a transaction and commit", - "useMultipleMongoses": true, - "clientOptions": { - "heartbeatFrequencyMS": 30000 - }, + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "uriOptions": { + "heartbeatFrequencyMS": 30000 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" }, { + "object": "collection1", "name": "insertOne", - "object": "collection", "arguments": { - "session": "session0", + "session": "session1", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { - "name": "targetedFailPoint", "object": "testRunner", + "name": "targetedFailPoint", "arguments": { - "session": "session0", + "session": "session1", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1116,15 +1322,15 @@ } }, { + "object": "collection1", "name": "insertOne", - "object": "collection", "arguments": { - "session": "session0", + "session": "session1", "document": { "_id": 4 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ], @@ -1134,9 +1340,9 @@ } }, { + "object": "session1", "name": "commitTransaction", - "object": "session0", - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ], @@ -1147,74 +1353,103 @@ } } ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, + "expectEvents": [ { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } }, - "command_name": "commitTransaction", - "database_name": "admin" - } + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1223,7 +1458,7 @@ } ] } - } + ] } ] } diff --git a/test/transactions/unified/read-concern.json b/test/transactions/unified/read-concern.json new file mode 100644 index 0000000000..b3bd967c09 --- /dev/null +++ b/test/transactions/unified/read-concern.json @@ -0,0 +1,1924 @@ +{ + "description": "read-concern", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "database": { + "id": "database_rc_majority", + "client": "client0", + "databaseName": "transaction-tests", + "databaseOptions": { + "readConcern": { + "level": "majority" + } + } + } + }, + { + "collection": { + "id": "collection_rc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ], + "tests": [ + { + "description": "only first countDocuments includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "collection_rc_majority", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "session": "session0" + }, + "expectResult": 3 + }, + { + "object": "collection_rc_majority", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "session": "session0" + }, + "expectResult": 3 + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "_id": { + "$gte": 2 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "cursor": {}, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "_id": { + "$gte": 2 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "cursor": {}, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "only first find includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "collection_rc_majority", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rc_majority", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "only first aggregate includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "collection_rc_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rc_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "only first distinct includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "collection_rc_majority", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "collection_rc_majority", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "only first runCommand includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "countDocuments ignores collection readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_rc_majority", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "session": "session0" + }, + "expectResult": 3 + }, + { + "object": "collection_rc_majority", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "session": "session0" + }, + "expectResult": 3 + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "_id": { + "$gte": 2 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "cursor": {}, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "_id": { + "$gte": 2 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "cursor": {}, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "find ignores collection readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_rc_majority", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rc_majority", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "aggregate ignores collection readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_rc_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rc_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "distinct ignores collection readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_rc_majority", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "collection_rc_majority", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "runCommand ignores database readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database_rc_majority", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/legacy/read-pref.json b/test/transactions/unified/read-pref.json similarity index 63% rename from test/transactions/legacy/read-pref.json rename to test/transactions/unified/read-pref.json index bf1f1970eb..eda00bd10d 100644 --- a/test/transactions/legacy/read-pref.json +++ b/test/transactions/unified/read-pref.json @@ -1,32 +1,84 @@ { - "runOn": [ + "description": "read-pref", + "schemaVersion": "1.3", + "runOnRequirements": [ { "minServerVersion": "4.0", - "topology": [ + "topologies": [ "replicaset" ] }, { "minServerVersion": "4.1.8", - "topology": [ - "sharded" + "topologies": [ + "sharded", + "load-balanced" ] } ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection_rp_primary", + "database": "database0", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection_rp_secondary", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], "tests": [ { "description": "default readPreference", "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertMany", - "object": "collection", "arguments": { "documents": [ { @@ -44,23 +96,22 @@ ], "session": "session0" }, - "result": { - "insertedIds": { - "0": 1, - "1": 2, - "2": 3, - "3": 4 + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } } } }, { + "object": "collection_rp_secondary", "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Secondary" - } - }, "arguments": { "session": "session0", "pipeline": [ @@ -74,25 +125,21 @@ } ] }, - "result": [ + "expectResult": [ { "count": 1 } ] }, { + "object": "collection_rp_secondary", "name": "find", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Secondary" - } - }, "arguments": { - "session": "session0", - "batchSize": 3 + "batchSize": 3, + "filter": {}, + "session": "session0" }, - "result": [ + "expectResult": [ { "_id": 1 }, @@ -108,13 +155,8 @@ ] }, { + "object": "collection_rp_secondary", "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Secondary" - } - }, "arguments": { "pipeline": [ { @@ -126,7 +168,7 @@ "batchSize": 3, "session": "session0" }, - "result": [ + "expectResult": [ { "_id": 1 }, @@ -142,13 +184,15 @@ ] }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -163,25 +207,23 @@ } ] } - } + ] }, { "description": "primary readPreference", "operations": [ { - "name": "startTransaction", "object": "session0", + "name": "startTransaction", "arguments": { - "options": { - "readPreference": { - "mode": "Primary" - } + "readPreference": { + "mode": "primary" } } }, { + "object": "collection0", "name": "insertMany", - "object": "collection", "arguments": { "documents": [ { @@ -199,23 +241,22 @@ ], "session": "session0" }, - "result": { - "insertedIds": { - "0": 1, - "1": 2, - "2": 3, - "3": 4 + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } } } }, { + "object": "collection_rp_secondary", "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Secondary" - } - }, "arguments": { "session": "session0", "pipeline": [ @@ -229,25 +270,21 @@ } ] }, - "result": [ + "expectResult": [ { "count": 1 } ] }, { + "object": "collection_rp_secondary", "name": "find", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Secondary" - } - }, "arguments": { - "session": "session0", - "batchSize": 3 + "batchSize": 3, + "filter": {}, + "session": "session0" }, - "result": [ + "expectResult": [ { "_id": 1 }, @@ -263,13 +300,8 @@ ] }, { + "object": "collection_rp_secondary", "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Secondary" - } - }, "arguments": { "pipeline": [ { @@ -281,7 +313,7 @@ "batchSize": 3, "session": "session0" }, - "result": [ + "expectResult": [ { "_id": 1 }, @@ -297,13 +329,15 @@ ] }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -318,25 +352,23 @@ } ] } - } + ] }, { "description": "secondary readPreference", "operations": [ { - "name": "startTransaction", "object": "session0", + "name": "startTransaction", "arguments": { - "options": { - "readPreference": { - "mode": "Secondary" - } + "readPreference": { + "mode": "secondary" } } }, { + "object": "collection0", "name": "insertMany", - "object": "collection", "arguments": { "documents": [ { @@ -354,23 +386,22 @@ ], "session": "session0" }, - "result": { - "insertedIds": { - "0": 1, - "1": 2, - "2": 3, - "3": 4 + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } } } }, { + "object": "collection_rp_primary", "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, "arguments": { "session": "session0", "pipeline": [ @@ -384,34 +415,25 @@ } ] }, - "result": { + "expectError": { "errorContains": "read preference in a transaction must be primary" } }, { + "object": "collection_rp_primary", "name": "find", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, "arguments": { - "session": "session0", - "batchSize": 3 + "batchSize": 3, + "filter": {}, + "session": "session0" }, - "result": { + "expectError": { "errorContains": "read preference in a transaction must be primary" } }, { + "object": "collection_rp_primary", "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, "arguments": { "pipeline": [ { @@ -423,38 +445,38 @@ "batchSize": 3, "session": "session0" }, - "result": { + "expectError": { "errorContains": "read preference in a transaction must be primary" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [] + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] } - } + ] }, { "description": "primaryPreferred readPreference", "operations": [ { - "name": "startTransaction", "object": "session0", + "name": "startTransaction", "arguments": { - "options": { - "readPreference": { - "mode": "PrimaryPreferred" - } + "readPreference": { + "mode": "primaryPreferred" } } }, { + "object": "collection0", "name": "insertMany", - "object": "collection", "arguments": { "documents": [ { @@ -472,23 +494,22 @@ ], "session": "session0" }, - "result": { - "insertedIds": { - "0": 1, - "1": 2, - "2": 3, - "3": 4 + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } } } }, { + "object": "collection_rp_primary", "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, "arguments": { "session": "session0", "pipeline": [ @@ -502,34 +523,25 @@ } ] }, - "result": { + "expectError": { "errorContains": "read preference in a transaction must be primary" } }, { + "object": "collection_rp_primary", "name": "find", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, "arguments": { - "session": "session0", - "batchSize": 3 + "batchSize": 3, + "filter": {}, + "session": "session0" }, - "result": { + "expectError": { "errorContains": "read preference in a transaction must be primary" } }, { + "object": "collection_rp_primary", "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, "arguments": { "pipeline": [ { @@ -541,38 +553,38 @@ "batchSize": 3, "session": "session0" }, - "result": { + "expectError": { "errorContains": "read preference in a transaction must be primary" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [] + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] } - } + ] }, { "description": "nearest readPreference", "operations": [ { - "name": "startTransaction", "object": "session0", + "name": "startTransaction", "arguments": { - "options": { - "readPreference": { - "mode": "Nearest" - } + "readPreference": { + "mode": "nearest" } } }, { + "object": "collection0", "name": "insertMany", - "object": "collection", "arguments": { "documents": [ { @@ -590,23 +602,22 @@ ], "session": "session0" }, - "result": { - "insertedIds": { - "0": 1, - "1": 2, - "2": 3, - "3": 4 + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } } } }, { + "object": "collection_rp_primary", "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, "arguments": { "session": "session0", "pipeline": [ @@ -620,34 +631,25 @@ } ] }, - "result": { + "expectError": { "errorContains": "read preference in a transaction must be primary" } }, { + "object": "collection_rp_primary", "name": "find", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, "arguments": { - "session": "session0", - "batchSize": 3 + "batchSize": 3, + "filter": {}, + "session": "session0" }, - "result": { + "expectError": { "errorContains": "read preference in a transaction must be primary" } }, { + "object": "collection_rp_primary", "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, "arguments": { "pipeline": [ { @@ -659,62 +661,68 @@ "batchSize": 3, "session": "session0" }, - "result": { + "expectError": { "errorContains": "read preference in a transaction must be primary" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [] + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] } - } + ] }, { "description": "secondary write only", "operations": [ { - "name": "startTransaction", "object": "session0", + "name": "startTransaction", "arguments": { - "options": { - "readPreference": { - "mode": "Secondary" - } + "readPreference": { + "mode": "secondary" } } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 1 } }, - "result": { - "insertedId": 1 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } } }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 } ] } - } + ] } ] } diff --git a/test/transactions/unified/reads.json b/test/transactions/unified/reads.json new file mode 100644 index 0000000000..52e8457634 --- /dev/null +++ b/test/transactions/unified/reads.json @@ -0,0 +1,706 @@ +{ + "description": "reads", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ], + "tests": [ + { + "description": "collection readConcern without transaction", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "readConcern": { + "level": "majority" + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "find", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "distinct", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-abort-errorLabels.json b/test/transactions/unified/retryable-abort-errorLabels.json new file mode 100644 index 0000000000..77a1b03eb0 --- /dev/null +++ b/test/transactions/unified/retryable-abort-errorLabels.json @@ -0,0 +1,2436 @@ +{ + "description": "retryable-abort-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "abortTransaction only retries once with RetryableWriteError from server", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction does not retry without RetryableWriteError label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after WriteConcernError InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11600, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after WriteConcernError InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11602, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after WriteConcernError PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 189, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-abort.json b/test/transactions/unified/retryable-abort.json new file mode 100644 index 0000000000..381cfa91f8 --- /dev/null +++ b/test/transactions/unified/retryable-abort.json @@ -0,0 +1,600 @@ +{ + "description": "retryable-abort", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "abortTransaction only performs a single retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction does not retry after Interrupted", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11601, + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction does not retry after WriteConcernError Interrupted", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "writeConcernError": { + "code": 11601, + "errmsg": "operation was interrupted" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after connection error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-commit-errorLabels.json b/test/transactions/unified/retryable-commit-errorLabels.json new file mode 100644 index 0000000000..d3ce8b148e --- /dev/null +++ b/test/transactions/unified/retryable-commit-errorLabels.json @@ -0,0 +1,2564 @@ +{ + "description": "retryable-commit-errorLabels", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction does not retry error without RetryableWriteError label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "commitTransaction retries once with RetryableWriteError from server", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after WriteConcernError InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11600, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after WriteConcernError InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11602, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after WriteConcernError PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 189, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after InterruptedAtShutdown", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after ShutdownInProgress", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-commit.json b/test/transactions/unified/retryable-commit.json new file mode 100644 index 0000000000..b794c1c55c --- /dev/null +++ b/test/transactions/unified/retryable-commit.json @@ -0,0 +1,868 @@ +{ + "description": "retryable-commit", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction fails after Interrupted", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11601, + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorCodeName": "Interrupted", + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 100, + "errmsg": "Not enough data-bearing nodes" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction fails after two errors", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction applies majority write concern on retries", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 2, + "journal": true, + "wtimeoutMS": 5000 + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 2, + "j": true, + "wtimeout": 5000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "j": true, + "wtimeout": 5000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "j": true, + "wtimeout": 5000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after connection error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-writes.json b/test/transactions/unified/retryable-writes.json new file mode 100644 index 0000000000..c196e68622 --- /dev/null +++ b/test/transactions/unified/retryable-writes.json @@ -0,0 +1,468 @@ +{ + "description": "retryable-writes", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "increment txnNumber", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 4, + "1": 5 + } + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "4" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + ] + }, + { + "description": "writes are not retried", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/run-command.json b/test/transactions/unified/run-command.json new file mode 100644 index 0000000000..7bd420ef74 --- /dev/null +++ b/test/transactions/unified/run-command.json @@ -0,0 +1,421 @@ +{ + "description": "run-command", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "run command with default read preference", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + }, + "expectResult": { + "n": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "run command with secondary read preference in client option and primary read preference in transaction options", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readPreference": "secondary" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "primary" + } + } + }, + { + "object": "database1", + "name": "runCommand", + "arguments": { + "session": "session1", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + }, + "expectResult": { + "n": 1 + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "run command with explicit primary read preference", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + }, + "readPreference": { + "mode": "primary" + }, + "commandName": "insert" + }, + "expectResult": { + "n": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "run command fails with explicit secondary read preference", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "readPreference": { + "mode": "secondary" + }, + "commandName": "find" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + } + ] + }, + { + "description": "run command fails with secondary read preference from transaction options", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "secondary" + } + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + } + ] + } + ] +} diff --git a/test/transactions/unified/transaction-options-repl.json b/test/transactions/unified/transaction-options-repl.json new file mode 100644 index 0000000000..dc2cb77582 --- /dev/null +++ b/test/transactions/unified/transaction-options-repl.json @@ -0,0 +1,267 @@ +{ + "description": "transaction-options-repl", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "readConcern snapshot in startTransaction options", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + } + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "snapshot" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "snapshot" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "snapshot" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "snapshot", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/transaction-options.json b/test/transactions/unified/transaction-options.json new file mode 100644 index 0000000000..78e4c8207b --- /dev/null +++ b/test/transactions/unified/transaction-options.json @@ -0,0 +1,2081 @@ +{ + "description": "transaction-options", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "no transaction options set", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction options inherited from client", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction options inherited from defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + }, + "maxCommitTimeMS": 60000 + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "startTransaction options override defaults", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "snapshot" + }, + "writeConcern": { + "w": 1 + }, + "maxCommitTimeMS": 30000 + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": "majority" + }, + "maxCommitTimeMS": 60000 + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "defaultTransactionOptions override client options", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": "majority" + }, + "maxCommitTimeMS": 60000 + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "readConcern local in defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "local" + } + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "client writeConcern ignored for bulk", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 1 + } + } + }, + { + "object": "collection1", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ], + "session": "session1" + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "readPreference inherited from client", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readPreference": "secondary" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "readPreference inherited from defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readPreference": "primary" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readPreference": { + "mode": "secondary" + } + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "startTransaction overrides readPreference", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readPreference": "primary" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readPreference": { + "mode": "primary" + } + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "secondary" + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/update.json b/test/transactions/unified/update.json new file mode 100644 index 0000000000..8090fc9087 --- /dev/null +++ b/test/transactions/unified/update.json @@ -0,0 +1,565 @@ +{ + "description": "update", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "session": "session0", + "filter": { + "x": 1 + }, + "replacement": { + "y": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 3 + } + }, + "update": { + "$set": { + "z": 1 + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "x": 1 + }, + "u": { + "y": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gte": 3 + } + }, + "u": { + "$set": { + "z": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3, + "z": 1 + }, + { + "_id": 4, + "y": 1, + "z": 1 + } + ] + } + ] + }, + { + "description": "collections writeConcern ignored for update", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection1", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + }, + { + "object": "collection1", + "name": "replaceOne", + "arguments": { + "session": "session0", + "filter": { + "x": 1 + }, + "replacement": { + "y": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "collection1", + "name": "updateMany", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 3 + } + }, + "update": { + "$set": { + "z": 1 + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "x": 1 + }, + "u": { + "y": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gte": 3 + } + }, + "u": { + "$set": { + "z": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/write-concern.json b/test/transactions/unified/write-concern.json new file mode 100644 index 0000000000..7acdd54066 --- /dev/null +++ b/test/transactions/unified/write-concern.json @@ -0,0 +1,1584 @@ +{ + "description": "write-concern", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection_w0", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + } + ] + } + ], + "tests": [ + { + "description": "commit with majority", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commit with default", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "abort with majority", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + } + ] + } + ] + }, + { + "description": "abort with default", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + } + ] + } + ] + }, + { + "description": "start with unacknowledged write concern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 0 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "transactions do not support unacknowledged write concern" + } + } + ] + }, + { + "description": "start with implicit unacknowledged write concern", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "w": 0 + } + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "expectError": { + "isClientError": true, + "errorContains": "transactions do not support unacknowledged write concern" + } + } + ] + }, + { + "description": "unacknowledged write concern coll insertOne", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll insertMany", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "insertMany", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2 + } + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "bulkWrite", + "arguments": { + "session": "session0", + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll deleteOne", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 0 + }, + "limit": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "unacknowledged write concern coll deleteMany", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "deleteMany", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 0 + }, + "limit": 0 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "unacknowledged write concern coll updateOne", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 0 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0, + "x": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll updateMany", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "updateMany", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 0 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": true + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0, + "x": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll findOneAndDelete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "findOneAndDelete", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + } + }, + "expectResult": { + "_id": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 0 + }, + "remove": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "unacknowledged write concern coll findOneAndReplace", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "findOneAndReplace", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + }, + "replacement": { + "x": 1 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 0 + }, + "update": { + "x": 1 + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0, + "x": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll findOneAndUpdate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 0 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/unicode/test_utf8.py b/test/unicode/test_utf8.py index fd7fb2154d..578d98bffb 100644 --- a/test/unicode/test_utf8.py +++ b/test/unicode/test_utf8.py @@ -11,7 +11,6 @@ class TestUTF8(unittest.TestCase): - # Verify that python and bson have the same understanding of # legal utf-8 if the first byte is 0xf4 (244) def _assert_same_utf8_validation(self, data): diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-invalidName.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-invalidName.json new file mode 100644 index 0000000000..9c659c8f76 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-invalidName.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-invalidName", + "schemaVersion": "1.18", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws:name_with_invalid_character*": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeLogMessages-minProperties.json b/test/unified-test-format/invalid/entity-client-observeLogMessages-minProperties.json new file mode 100644 index 0000000000..87cbd21125 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeLogMessages-minProperties.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeLogMessages-minProperties", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0", + "observeLogMessages": {} + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeLogMessages-property-type.json b/test/unified-test-format/invalid/entity-client-observeLogMessages-property-type.json new file mode 100644 index 0000000000..fed0accd6e --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeLogMessages-property-type.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-observeLogMessages-property-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0", + "observeLogMessages": { + "command": {} + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeLogMessages-property-value.json b/test/unified-test-format/invalid/entity-client-observeLogMessages-property-value.json new file mode 100644 index 0000000000..f14b18d6de --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeLogMessages-property-value.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-observeLogMessages-property-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0", + "observeLogMessages": { + "command": "notALogLevel" + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeLogMessages-type.json b/test/unified-test-format/invalid/entity-client-observeLogMessages-type.json new file mode 100644 index 0000000000..8a277034e2 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeLogMessages-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeLogMessages-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0", + "observeLogMessages": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-interruptInUseConnections-type.json b/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-interruptInUseConnections-type.json new file mode 100644 index 0000000000..de59318822 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-interruptInUseConnections-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolClearedEvent-interruptInUseConnections-type", + "schemaVersion": "1.11", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolClearedEvent": { + "interruptInUseConnections": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-databaseName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-databaseName-type.json new file mode 100644 index 0000000000..f6a305b89a --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-databaseName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandFailedEvent-databaseName-type", + "schemaVersion": "1.15", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandFailedEvent": { + "databaseName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-databaseName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-databaseName-type.json new file mode 100644 index 0000000000..47b8c8bb9d --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-databaseName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-databaseName-type", + "schemaVersion": "1.15", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "databaseName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorResponse-type.json b/test/unified-test-format/invalid/expectedError-errorResponse-type.json new file mode 100644 index 0000000000..6eb66d9b0b --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorResponse-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorResponse-type", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorResponse": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-additionalProperties.json b/test/unified-test-format/invalid/expectedLogMessage-additionalProperties.json new file mode 100644 index 0000000000..cd7cf8726c --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-additionalProperties.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessage-additionalProperties", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "foo": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-component-enum.json b/test/unified-test-format/invalid/expectedLogMessage-component-enum.json new file mode 100644 index 0000000000..2283e9b243 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-component-enum.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-component-enum", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": "foo", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-component-required.json b/test/unified-test-format/invalid/expectedLogMessage-component-required.json new file mode 100644 index 0000000000..f3a157787f --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-component-required.json @@ -0,0 +1,28 @@ +{ + "description": "expectedLogMessage-component-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-component-type.json b/test/unified-test-format/invalid/expectedLogMessage-component-type.json new file mode 100644 index 0000000000..af8f711573 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-component-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-component-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": 0, + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-data-required.json b/test/unified-test-format/invalid/expectedLogMessage-data-required.json new file mode 100644 index 0000000000..7e8152dddd --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-data-required.json @@ -0,0 +1,28 @@ +{ + "description": "expectedLogMessage-data-required", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": "command" + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-data-type.json b/test/unified-test-format/invalid/expectedLogMessage-data-type.json new file mode 100644 index 0000000000..4f81fb6272 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-data-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-data-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": "command", + "data": 0 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-failureIsRedacted-type.json b/test/unified-test-format/invalid/expectedLogMessage-failureIsRedacted-type.json new file mode 100644 index 0000000000..190748a185 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-failureIsRedacted-type.json @@ -0,0 +1,30 @@ +{ + "description": "expectedLogMessage-failureIsRedacted-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": "command", + "failureIsRedacted": 0, + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-level-enum.json b/test/unified-test-format/invalid/expectedLogMessage-level-enum.json new file mode 100644 index 0000000000..f4c886bb68 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-level-enum.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-level-enum", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "foo", + "component": "command", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-level-required.json b/test/unified-test-format/invalid/expectedLogMessage-level-required.json new file mode 100644 index 0000000000..27c9c7a6cd --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-level-required.json @@ -0,0 +1,28 @@ +{ + "description": "expectedLogMessage-level-required", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "component": "command", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-level-type.json b/test/unified-test-format/invalid/expectedLogMessage-level-type.json new file mode 100644 index 0000000000..180d7afcd6 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-level-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-level-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": 0, + "component": "command", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-additionalProperties.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-additionalProperties.json new file mode 100644 index 0000000000..306b78b446 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-additionalProperties.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessagesForClient-additionalProperties", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "foo": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-client-required.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-client-required.json new file mode 100644 index 0000000000..d8e1100bea --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-client-required.json @@ -0,0 +1,22 @@ +{ + "description": "expectedLogMessagesForClient-client-required", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "messages": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-client-type.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-client-type.json new file mode 100644 index 0000000000..5399cac029 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-client-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedEventsForClient-client-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": 0, + "messages": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreExtraMessages-type.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreExtraMessages-type.json new file mode 100644 index 0000000000..a9f2da9bce --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreExtraMessages-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessagesForClient-ignoreExtraMessages-type", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "ignoreExtraMessages": "true", + "messages": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-items.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-items.json new file mode 100644 index 0000000000..345faf41f5 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-items.json @@ -0,0 +1,26 @@ +{ + "description": "expectedLogMessagesForClient-ignoreMessages-items", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "ignoreMessages": [ + 0 + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-type.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-type.json new file mode 100644 index 0000000000..4bc2d41dbf --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessagesForClient-ignoreMessages-type", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "ignoreMessages": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-items.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-items.json new file mode 100644 index 0000000000..9788d8fe5c --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-items.json @@ -0,0 +1,25 @@ +{ + "description": "expectedLogMessagesForClient-messages-items", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + 0 + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-required.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-required.json new file mode 100644 index 0000000000..85d070672f --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-required.json @@ -0,0 +1,22 @@ +{ + "description": "expectedLogMessagesForClient-messages-required", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-type.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-type.json new file mode 100644 index 0000000000..27531667c5 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedLogMessagesForClient-messages-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-topologyDescriptionChangedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedSdamEvent-topologyDescriptionChangedEvent-additionalProperties.json new file mode 100644 index 0000000000..ef2686e93f --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-topologyDescriptionChangedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedSdamEvent-topologyDescriptionChangedEvent-additionalProperties", + "schemaVersion": "1.14", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "topologyDescriptionChangedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-authMechanism-type.json b/test/unified-test-format/invalid/runOnRequirement-authMechanism-type.json new file mode 100644 index 0000000000..b97654a743 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-authMechanism-type.json @@ -0,0 +1,17 @@ +{ + "description": "runOnRequirement-authMechanism-type", + "schemaVersion": "1.19", + "runOnRequirements": [ + { + "authMechanism": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectLogMessages-items.json b/test/unified-test-format/invalid/test-expectLogMessages-items.json new file mode 100644 index 0000000000..be4a609c56 --- /dev/null +++ b/test/unified-test-format/invalid/test-expectLogMessages-items.json @@ -0,0 +1,13 @@ +{ + "description": "test-expectLogMessages-items", + "schemaVersion": "1.13", + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectLogMessages-minItems.json b/test/unified-test-format/invalid/test-expectLogMessages-minItems.json new file mode 100644 index 0000000000..d7a07c2e77 --- /dev/null +++ b/test/unified-test-format/invalid/test-expectLogMessages-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "test-expectLogMessages-minItems", + "schemaVersion": "1.11", + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectLogMessages-type.json b/test/unified-test-format/invalid/test-expectLogMessages-type.json new file mode 100644 index 0000000000..9a8d6fcdfb --- /dev/null +++ b/test/unified-test-format/invalid/test-expectLogMessages-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-expectLogMessages-type", + "schemaVersion": "1.13", + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": 0 + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-commandCursor.json b/test/unified-test-format/valid-pass/entity-commandCursor.json new file mode 100644 index 0000000000..72b74b4a9a --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-commandCursor.json @@ -0,0 +1,278 @@ +{ + "description": "entity-commandCursor", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "runCursorCommand creates and exhausts cursor by running getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "createCommandCursor creates a cursor and stores it as an entity that can be iterated one document at a time", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2 + } + }, + "saveResultAsEntity": "myRunCommandCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 2, + "x": 22 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 3, + "x": 33 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 4, + "x": 44 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 5, + "x": 55 + } + } + ] + }, + { + "description": "createCommandCursor's cursor can be closed and will perform a killCursors operation", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2 + } + }, + "saveResultAsEntity": "myRunCommandCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "close", + "object": "myRunCommandCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "collection", + "cursors": { + "$$type": "array" + } + }, + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json b/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json index 88fc28e34e..b17ae78b94 100644 --- a/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json +++ b/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json @@ -93,7 +93,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, diff --git a/test/unified-test-format/valid-pass/entity-find-cursor.json b/test/unified-test-format/valid-pass/entity-find-cursor.json index 85b8f69d7f..6f955d81f4 100644 --- a/test/unified-test-format/valid-pass/entity-find-cursor.json +++ b/test/unified-test-format/valid-pass/entity-find-cursor.json @@ -109,7 +109,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" @@ -126,7 +129,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -138,7 +144,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" diff --git a/test/unified-test-format/valid-pass/expectedError-errorResponse.json b/test/unified-test-format/valid-pass/expectedError-errorResponse.json new file mode 100644 index 0000000000..177b1baf56 --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedError-errorResponse.json @@ -0,0 +1,70 @@ +{ + "description": "expectedError-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "tests": [ + { + "description": "Unsupported command", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "unsupportedCommand", + "command": { + "unsupportedCommand": 1 + } + }, + "expectError": { + "errorResponse": { + "errmsg": { + "$$type": "string" + } + } + } + } + ] + }, + { + "description": "Unsupported query operator", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$unsupportedQueryOperator": 1 + } + }, + "expectError": { + "errorResponse": { + "errmsg": { + "$$type": "string" + } + } + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/expectedEventsForClient-topologyDescriptionChangedEvent.json b/test/unified-test-format/valid-pass/expectedEventsForClient-topologyDescriptionChangedEvent.json new file mode 100644 index 0000000000..cf7bd60826 --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedEventsForClient-topologyDescriptionChangedEvent.json @@ -0,0 +1,68 @@ +{ + "description": "expectedEventsForClient-topologyDescriptionChangedEvent", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "can assert on values of newDescription and previousDescription fields", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "directConnection": true + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": { + "type": "Single" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-crud.json b/test/unified-test-format/valid-pass/poc-crud.json index 0790d9b789..94e4ec5682 100644 --- a/test/unified-test-format/valid-pass/poc-crud.json +++ b/test/unified-test-format/valid-pass/poc-crud.json @@ -322,7 +322,7 @@ "minServerVersion": "4.1.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ], "serverless": "forbid" } diff --git a/test/unified-test-format/valid-pass/poc-retryable-writes.json b/test/unified-test-format/valid-pass/poc-retryable-writes.json index 50160799f3..f19aa3f9d8 100644 --- a/test/unified-test-format/valid-pass/poc-retryable-writes.json +++ b/test/unified-test-format/valid-pass/poc-retryable-writes.json @@ -1,14 +1,6 @@ { "description": "poc-retryable-writes", "schemaVersion": "1.0", - "runOnRequirements": [ - { - "minServerVersion": "3.6", - "topologies": [ - "replicaset" - ] - } - ], "createEntities": [ { "client": { @@ -79,6 +71,14 @@ "tests": [ { "description": "FindOneAndUpdate is committed on first attempt", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], "operations": [ { "name": "failPoint", @@ -132,6 +132,14 @@ }, { "description": "FindOneAndUpdate is not committed on first attempt", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], "operations": [ { "name": "failPoint", @@ -188,6 +196,14 @@ }, { "description": "FindOneAndUpdate is never committed", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], "operations": [ { "name": "failPoint", @@ -245,15 +261,10 @@ "description": "InsertMany succeeds after PrimarySteppedDown", "runOnRequirements": [ { - "minServerVersion": "4.0", - "topologies": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", + "minServerVersion": "4.3.1", "topologies": [ - "sharded-replicaset" + "replicaset", + "sharded" ] } ], @@ -345,7 +356,7 @@ { "minServerVersion": "4.1.7", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -406,15 +417,10 @@ "description": "InsertOne fails after multiple retryable writeConcernErrors", "runOnRequirements": [ { - "minServerVersion": "4.0", - "topologies": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", + "minServerVersion": "4.3.1", "topologies": [ - "sharded-replicaset" + "replicaset", + "sharded" ] } ], @@ -433,6 +439,9 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, "errmsg": "Replication is being shut down" diff --git a/test/unified-test-format/valid-pass/poc-sessions.json b/test/unified-test-format/valid-pass/poc-sessions.json index 75f3489428..117c9e7d00 100644 --- a/test/unified-test-format/valid-pass/poc-sessions.json +++ b/test/unified-test-format/valid-pass/poc-sessions.json @@ -264,7 +264,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json b/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json index 820ed65927..9ab44a9c54 100644 --- a/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json +++ b/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json b/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json index a0b297d59a..de08edec44 100644 --- a/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json +++ b/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/unified-test-format/valid-pass/poc-transactions.json b/test/unified-test-format/valid-pass/poc-transactions.json index 0355ca2060..2055a3b705 100644 --- a/test/unified-test-format/valid-pass/poc-transactions.json +++ b/test/unified-test-format/valid-pass/poc-transactions.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -93,7 +93,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -203,7 +203,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/unified_format.py b/test/unified_format.py index 68ce36e6fa..3f98b571bb 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -29,9 +29,10 @@ import time import traceback import types -from collections import abc +from collections import abc, defaultdict from test import ( AWS_CREDS, + AWS_CREDS_2, AZURE_CREDS, CA_PEM, CLIENT_PEM, @@ -57,7 +58,7 @@ ) from test.utils_spec_runner import SpecRunnerThread from test.version import Version -from typing import Any, Dict, List, Mapping, Optional +from typing import Any, Dict, List, Mapping, Optional, Union import pymongo from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util @@ -81,6 +82,7 @@ EncryptionError, InvalidOperation, NotPrimaryError, + OperationFailure, PyMongoError, ) from pymongo.monitoring import ( @@ -108,7 +110,11 @@ ServerHeartbeatSucceededEvent, ServerListener, ServerOpeningEvent, + TopologyClosedEvent, + TopologyDescriptionChangedEvent, TopologyEvent, + TopologyListener, + TopologyOpenedEvent, _CommandEvent, _ConnectionEvent, _PoolEvent, @@ -139,19 +145,39 @@ } -# Build up a placeholder map. +# Build up a placeholder maps. PLACEHOLDER_MAP = {} for provider_name, provider_data in [ ("local", {"key": LOCAL_MASTER_KEY}), + ("local:name1", {"key": LOCAL_MASTER_KEY}), ("aws", AWS_CREDS), + ("aws:name1", AWS_CREDS), + ("aws:name2", AWS_CREDS_2), ("azure", AZURE_CREDS), + ("azure:name1", AZURE_CREDS), ("gcp", GCP_CREDS), + ("gcp:name1", GCP_CREDS), ("kmip", KMIP_CREDS), + ("kmip:name1", KMIP_CREDS), ]: for key, value in provider_data.items(): placeholder = f"/clientEncryptionOpts/kmsProviders/{provider_name}/{key}" PLACEHOLDER_MAP[placeholder] = value +OIDC_ENV = os.environ.get("OIDC_ENV", "test") +if OIDC_ENV == "test": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = {"ENVIRONMENT": "test"} +elif OIDC_ENV == "azure": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": os.environ["AZUREOIDC_RESOURCE"], + } +elif OIDC_ENV == "gcp": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = { + "ENVIRONMENT": "gcp", + "TOKEN_RESOURCE": os.environ["GCPOIDC_AUDIENCE"], + } + def interrupt_loop(): global IS_INTERRUPTED @@ -169,7 +195,7 @@ def with_metaclass(meta, *bases): # the actual metaclass. class metaclass(type): def __new__(cls, name, this_bases, d): - if sys.version_info[:2] >= (3, 7): + if sys.version_info[:2] >= (3, 7): # noqa: UP036 # This version introduced PEP 560 that requires a bit # of extra care (we mimic what is done by __build_class__). resolved_bases = types.resolve_bases(bases) @@ -226,6 +252,8 @@ def is_run_on_requirement_satisfied(requirement): if req_auth is not None: if req_auth: auth_satisfied = client_context.auth_enabled + if auth_satisfied and "authMechanism" in requirement: + auth_satisfied = client_context.check_auth_type(requirement["authMechanism"]) else: auth_satisfied = not client_context.auth_enabled @@ -295,7 +323,9 @@ def close(self): self.client = None -class EventListenerUtil(CMAPListener, CommandListener, ServerListener, ServerHeartbeatListener): +class EventListenerUtil( + CMAPListener, CommandListener, ServerListener, ServerHeartbeatListener, TopologyListener +): def __init__( self, observe_events, ignore_commands, observe_sensitive_commands, store_events, entity_map ): @@ -377,13 +407,18 @@ def failed(self, event): else: self.add_event(event) - def opened(self, event: ServerOpeningEvent) -> None: + def opened(self, event: Union[ServerOpeningEvent, TopologyOpenedEvent]) -> None: self.add_event(event) - def description_changed(self, event: ServerDescriptionChangedEvent) -> None: + def description_changed( + self, event: Union[ServerDescriptionChangedEvent, TopologyDescriptionChangedEvent] + ) -> None: self.add_event(event) - def closed(self, event: ServerClosedEvent) -> None: + def topology_changed(self, event: TopologyDescriptionChangedEvent) -> None: + self.add_event(event) + + def closed(self, event: Union[ServerClosedEvent, TopologyClosedEvent]) -> None: self.add_event(event) @@ -397,6 +432,7 @@ def __init__(self, test_class): self._listeners: Dict[str, EventListenerUtil] = {} self._session_lsids: Dict[str, Mapping[str, Any]] = {} self.test: UnifiedSpecTestMixinV1 = test_class + self._cluster_time: Mapping[str, Any] = {} def __contains__(self, item): return item in self._entities @@ -460,6 +496,10 @@ def _create_entity(self, entity_spec, uri=None): kwargs["h"] = client_context.mongos_seeds() kwargs.update(spec.get("uriOptions", {})) server_api = spec.get("serverApi") + if "waitQueueSize" in kwargs: + raise unittest.SkipTest("PyMongo does not support waitQueueSize") + if "waitQueueMultiple" in kwargs: + raise unittest.SkipTest("PyMongo does not support waitQueueMultiple") if server_api: kwargs["server_api"] = ServerApi( server_api["version"], @@ -532,12 +572,18 @@ def drop(self: GridFSBucket, *args: Any, **kwargs: Any) -> None: opts = camel_to_snake_args(spec["clientEncryptionOpts"].copy()) if isinstance(opts["key_vault_client"], str): opts["key_vault_client"] = self[opts["key_vault_client"]] + # Set TLS options for providers like "kmip:name1". + kms_tls_options = {} + for provider in opts["kms_providers"]: + provider_type = provider.split(":")[0] + if provider_type in KMS_TLS_OPTS: + kms_tls_options[provider] = KMS_TLS_OPTS[provider_type] self[spec["id"]] = ClientEncryption( opts["kms_providers"], opts["key_vault_namespace"], opts["key_vault_client"], DEFAULT_CODEC_OPTIONS, - opts.get("kms_tls_options", KMS_TLS_OPTS), + opts.get("kms_tls_options", kms_tls_options), ) return elif entity_type == "thread": @@ -579,6 +625,14 @@ def get_lsid_for_session(self, session_name): # session has been closed. return self._session_lsids[session_name] + def advance_cluster_times(self) -> None: + """Manually synchronize entities when desired""" + if not self._cluster_time: + self._cluster_time = self.test.client.admin.command("ping").get("$clusterTime") + for entity in self._entities.values(): + if isinstance(entity, ClientSession) and self._cluster_time: + entity.advance_cluster_time(self._cluster_time) + binary_types = (Binary, bytes) long_types = (Int64,) @@ -677,6 +731,12 @@ def _operation_lte(self, spec, actual, key_to_compare): self.test.fail(f"Actual command is missing the {key_to_compare} field: {spec}") self.test.assertLessEqual(actual[key_to_compare], spec) + def _operation_matchAsDocument(self, spec, actual, key_to_compare): + self._match_document(spec, json_util.loads(actual[key_to_compare]), False) + + def _operation_matchAsRoot(self, spec, actual, key_to_compare): + self._match_document(spec, actual, True) + def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): method_name = "_operation_{}".format(opname.strip("$")) try: @@ -767,64 +827,79 @@ def match_result(self, expectation, actual, in_recursive_call=False): self.test.assertEqual(expectation, actual) return None - def assertHasDatabaseName(self, spec, actual): - if "databaseName" in spec: - self.test.assertEqual(spec["databaseName"], actual.database_name) + def match_server_description(self, actual: ServerDescription, spec: dict) -> None: + for field, expected in spec.items(): + field = camel_to_snake(field) + if field == "type": + field = "server_type_name" + self.test.assertEqual(getattr(actual, field), expected) + + def match_topology_description(self, actual: TopologyDescription, spec: dict) -> None: + for field, expected in spec.items(): + field = camel_to_snake(field) + if field == "type": + field = "topology_type_name" + self.test.assertEqual(getattr(actual, field), expected) + + def match_event_fields(self, actual: Any, spec: dict) -> None: + for field, expected in spec.items(): + if field == "command" and isinstance(actual, CommandStartedEvent): + command = spec["command"] + if command: + self.match_result(command, actual.command) + continue + if field == "reply" and isinstance(actual, CommandSucceededEvent): + reply = spec["reply"] + if reply: + self.match_result(reply, actual.reply) + continue + if field == "hasServiceId": + if spec["hasServiceId"]: + self.test.assertIsNotNone(actual.service_id) + self.test.assertIsInstance(actual.service_id, ObjectId) + else: + self.test.assertIsNone(actual.service_id) + continue + if field == "hasServerConnectionId": + if spec["hasServerConnectionId"]: + self.test.assertIsNotNone(actual.server_connection_id) + self.test.assertIsInstance(actual.server_connection_id, int) + else: + self.test.assertIsNone(actual.server_connection_id) + continue + if field in ("previousDescription", "newDescription"): + if isinstance(actual, ServerDescriptionChangedEvent): + self.match_server_description( + getattr(actual, camel_to_snake(field)), spec[field] + ) + continue + if isinstance(actual, TopologyDescriptionChangedEvent): + self.match_topology_description( + getattr(actual, camel_to_snake(field)), spec[field] + ) + continue - def assertHasServiceId(self, spec, actual): - if "hasServiceId" in spec: - if spec.get("hasServiceId"): - self.test.assertIsNotNone(actual.service_id) - self.test.assertIsInstance(actual.service_id, ObjectId) + if field == "interruptInUseConnections": + field = "interrupt_connections" else: - self.test.assertIsNone(actual.service_id) + field = camel_to_snake(field) + self.test.assertEqual(getattr(actual, field), expected) - def match_server_description(self, actual: ServerDescription, spec: dict) -> None: - if "type" in spec: - self.test.assertEqual(actual.server_type_name, spec["type"]) - if "error" in spec: - self.test.process_error(actual.error, spec["error"]) - if "minWireVersion" in spec: - self.test.assertEqual(actual.min_wire_version, spec["minWireVersion"]) - if "maxWireVersion" in spec: - self.test.assertEqual(actual.max_wire_version, spec["maxWireVersion"]) - if "topologyVersion" in spec: - self.test.assertEqual(actual.topology_version, spec["topologyVersion"]) - - def match_event(self, event_type, expectation, actual): + def match_event(self, expectation, actual): name, spec = next(iter(expectation.items())) - - # every command event has the commandName field - if event_type == "command": - command_name = spec.get("commandName") - if command_name: - self.test.assertEqual(command_name, actual.command_name) - if name == "commandStartedEvent": self.test.assertIsInstance(actual, CommandStartedEvent) - command = spec.get("command") - if command: - self.match_result(command, actual.command) - self.assertHasDatabaseName(spec, actual) - self.assertHasServiceId(spec, actual) elif name == "commandSucceededEvent": self.test.assertIsInstance(actual, CommandSucceededEvent) - reply = spec.get("reply") - if reply: - self.match_result(reply, actual.reply) - self.assertHasDatabaseName(spec, actual) - self.assertHasServiceId(spec, actual) elif name == "commandFailedEvent": self.test.assertIsInstance(actual, CommandFailedEvent) - self.assertHasServiceId(spec, actual) - self.assertHasDatabaseName(spec, actual) elif name == "poolCreatedEvent": self.test.assertIsInstance(actual, PoolCreatedEvent) elif name == "poolReadyEvent": self.test.assertIsInstance(actual, PoolReadyEvent) elif name == "poolClearedEvent": self.test.assertIsInstance(actual, PoolClearedEvent) - self.assertHasServiceId(spec, actual) + self.test.assertIsInstance(actual.interrupt_connections, bool) elif name == "poolClosedEvent": self.test.assertIsInstance(actual, PoolClosedEvent) elif name == "connectionCreatedEvent": @@ -833,41 +908,29 @@ def match_event(self, event_type, expectation, actual): self.test.assertIsInstance(actual, ConnectionReadyEvent) elif name == "connectionClosedEvent": self.test.assertIsInstance(actual, ConnectionClosedEvent) - if "reason" in spec: - self.test.assertEqual(actual.reason, spec["reason"]) elif name == "connectionCheckOutStartedEvent": self.test.assertIsInstance(actual, ConnectionCheckOutStartedEvent) elif name == "connectionCheckOutFailedEvent": self.test.assertIsInstance(actual, ConnectionCheckOutFailedEvent) - if "reason" in spec: - self.test.assertEqual(actual.reason, spec["reason"]) elif name == "connectionCheckedOutEvent": self.test.assertIsInstance(actual, ConnectionCheckedOutEvent) elif name == "connectionCheckedInEvent": self.test.assertIsInstance(actual, ConnectionCheckedInEvent) elif name == "serverDescriptionChangedEvent": self.test.assertIsInstance(actual, ServerDescriptionChangedEvent) - if "previousDescription" in spec: - self.match_server_description( - actual.previous_description, spec["previousDescription"] - ) - if "newDescription" in spec: - self.match_server_description(actual.new_description, spec["newDescription"]) elif name == "serverHeartbeatStartedEvent": self.test.assertIsInstance(actual, ServerHeartbeatStartedEvent) - if "awaited" in spec: - self.test.assertEqual(actual.awaited, spec["awaited"]) elif name == "serverHeartbeatSucceededEvent": self.test.assertIsInstance(actual, ServerHeartbeatSucceededEvent) - if "awaited" in spec: - self.test.assertEqual(actual.awaited, spec["awaited"]) elif name == "serverHeartbeatFailedEvent": self.test.assertIsInstance(actual, ServerHeartbeatFailedEvent) - if "awaited" in spec: - self.test.assertEqual(actual.awaited, spec["awaited"]) + elif name == "topologyDescriptionChangedEvent": + self.test.assertIsInstance(actual, TopologyDescriptionChangedEvent) else: raise Exception(f"Unsupported event type {name}") + self.match_event_fields(actual, spec) + def coerce_result(opname, result): """Convert a pymongo result into the spec's result format.""" @@ -882,11 +945,14 @@ def coerce_result(opname, result): if opname in ("deleteOne", "deleteMany"): return {"deletedCount": result.deleted_count} if opname in ("updateOne", "updateMany", "replaceOne"): - return { + value = { "matchedCount": result.matched_count, "modifiedCount": result.modified_count, "upsertedCount": 0 if result.upserted_id is None else 1, } + if result.upserted_id is not None: + value["upsertedId"] = result.upserted_id + return value return result @@ -900,10 +966,11 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.17") + SCHEMA_VERSION = Version.from_string("1.20") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any + mongos_clients: list[MongoClient] = [] @staticmethod def should_run_on(run_on_spec): @@ -948,11 +1015,27 @@ def setUpClass(cls): if not cls.should_run_on(run_on_spec): raise unittest.SkipTest(f"{cls.__name__} runOnRequirements not satisfied") + # Handle mongos_clients for transactions tests. + cls.mongos_clients = [] + if ( + client_context.supports_transactions() + and not client_context.load_balancer + and not client_context.serverless + ): + for address in client_context.mongoses: + cls.mongos_clients.append(single_client("{}:{}".format(*address))) + # add any special-casing for skipping tests here if client_context.storage_engine == "mmapv1": if "retryable-writes" in cls.TEST_SPEC["description"]: raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") + @classmethod + def tearDownClass(cls): + for client in cls.mongos_clients: + client.close() + super().tearDownClass() + def setUp(self): super().setUp() # process schemaVersion @@ -967,6 +1050,7 @@ def setUp(self): # initialize internals self.match_evaluator = MatchEvaluatorUtil(self) + self.IS_SERVERLESS_PROXY = os.environ.get("IS_SERVERLESS_PROXY") def maybe_skip_test(self, spec): # add any special-casing for skipping tests here @@ -977,10 +1061,28 @@ def maybe_skip_test(self, spec): or "Cancel server check" in spec["description"] ): self.skipTest("MMAPv1 does not support retryWrites=True") + if ( + "Database-level aggregate with $out includes read preference for 5.0+ server" + in spec["description"] + ): + if client_context.version[0] == 8: + self.skipTest("waiting on PYTHON-4356") + if "Aggregate with $out includes read preference for 5.0+ server" in spec["description"]: + if client_context.version[0] == 8: + self.skipTest("waiting on PYTHON-4356") if "Client side error in command starting transaction" in spec["description"]: self.skipTest("Implement PYTHON-1894") if "timeoutMS applied to entire download" in spec["description"]: self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") + if "unpin after non-transient error on abort" in spec["description"]: + if client_context.version[0] == 8: + self.skipTest("Skipping TransientTransactionError pending PYTHON-4182") + if self.IS_SERVERLESS_PROXY is not None and ( + "errors during the initial connection hello are ignored" in spec["description"] + or "pinned connection is released after a transient network commit error" + in spec["description"] + ): + self.skipTest("waiting on CLOUDP-202309") class_name = self.__class__.__name__.lower() description = spec["description"].lower() @@ -1025,6 +1127,8 @@ def maybe_skip_test(self, spec): self.skipTest("PyMongo does not support timeoutMode") def process_error(self, exception, spec): + if isinstance(exception, unittest.SkipTest): + raise is_error = spec.get("isError") is_client_error = spec.get("isClientError") is_timeout_error = spec.get("isTimeoutError") @@ -1036,8 +1140,7 @@ def process_error(self, exception, spec): expect_result = spec.get("expectResult") error_response = spec.get("errorResponse") if error_response: - for k in error_response.keys(): - self.assertEqual(error_response[k], exception.details[k]) + self.match_evaluator.match_result(error_response, exception.details) if is_error: # already satisfied because exception was raised @@ -1091,6 +1194,8 @@ def process_error(self, exception, spec): else: self.fail(f"expectResult can only be specified with {BulkWriteError} exceptions") + return exception + def __raise_if_unsupported(self, opname, target, *target_types): if not isinstance(target, target_types): self.fail(f"Operation {opname} not supported for entity of type {type(target)}") @@ -1153,6 +1258,18 @@ def _databaseOperation_createCommandCursor(self, target, **kwargs): return cursor + def kill_all_sessions(self): + if getattr(self, "client", None) is None: + return + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + try: + client.admin.command("killAllSessions", []) + except OperationFailure: + # "operation was interrupted" by killing the command's + # own session. + pass + def _databaseOperation_listCollections(self, target, *args, **kwargs): if "batch_size" in kwargs: kwargs["cursor"] = {"batchSize": kwargs.pop("batch_size")} @@ -1241,10 +1358,8 @@ def _cursor_close(self, target, *args, **kwargs): def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): if "opts" in kwargs: - opts = kwargs.pop("opts") - kwargs["master_key"] = opts.get("masterKey") - kwargs["key_alt_names"] = opts.get("keyAltNames") - kwargs["key_material"] = opts.get("keyMaterial") + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + return target.create_data_key(*args, **kwargs) def _clientEncryptionOperation_getKeys(self, target, *args, **kwargs): @@ -1258,14 +1373,17 @@ def _clientEncryptionOperation_deleteKey(self, target, *args, **kwargs): def _clientEncryptionOperation_rewrapManyDataKey(self, target, *args, **kwargs): if "opts" in kwargs: - opts = kwargs.pop("opts") - kwargs["provider"] = opts.get("provider") - kwargs["master_key"] = opts.get("masterKey") + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) data = target.rewrap_many_data_key(*args, **kwargs) if data.bulk_write_result: return {"bulkWriteResult": parse_bulk_write_result(data.bulk_write_result)} return {} + def _clientEncryptionOperation_encrypt(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + return target.encrypt(*args, **kwargs) + def _bucketOperation_download(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> bytes: with target.open_download_stream(*args, **kwargs) as gout: return gout.read() @@ -1309,7 +1427,11 @@ def run_entity_operation(self, spec): if opargs: arguments = parse_spec_options(copy.deepcopy(opargs)) prepare_spec_arguments( - spec, arguments, camel_to_snake(opname), self.entity_map, self.run_operations + spec, + arguments, + camel_to_snake(opname), + self.entity_map, + self.run_operations_and_throw, ) else: arguments = {} @@ -1367,7 +1489,7 @@ def run_entity_operation(self, spec): # Ignore all operation errors but to avoid masking bugs don't # ignore things like TypeError and ValueError. if ignore and isinstance(exc, (PyMongoError,)): - return None + return exc if expect_error: return self.process_error(exc, expect_error) raise @@ -1404,8 +1526,9 @@ def _testOperation_targetedFailPoint(self, spec): session = self.entity_map[spec["session"]] if not session._pinned_address: self.fail( - "Cannot use targetedFailPoint operation with unpinned " - "session {}".format(spec["session"]) + "Cannot use targetedFailPoint operation with unpinned " "session {}".format( + spec["session"] + ) ) client = single_client("{}:{}".format(*session._pinned_address)) @@ -1414,6 +1537,7 @@ def _testOperation_targetedFailPoint(self, spec): def _testOperation_createEntities(self, spec): self.entity_map.create_entities_from_spec(spec["entities"], uri=self._uri) + self.entity_map.advance_cluster_times() def _testOperation_assertSessionTransactionState(self, spec): session = self.entity_map[spec["session"]] @@ -1490,7 +1614,7 @@ def _event_count(self, client_name, event): count = 0 for actual in actual_events: try: - self.match_evaluator.match_event("all", event, actual) + self.match_evaluator.match_event(event, actual) except AssertionError: continue else: @@ -1621,6 +1745,15 @@ def run_operations(self, spec): else: self.run_entity_operation(op) + def run_operations_and_throw(self, spec): + for op in spec: + if op["object"] == "testRunner": + self.run_special_operation(op) + else: + result = self.run_entity_operation(op) + if isinstance(result, Exception): + raise result + def check_events(self, spec): for event_spec in spec: client_name = event_spec["client"] @@ -1638,10 +1771,17 @@ def check_events(self, spec): self.assertEqual(actual_events, []) continue - self.assertEqual(len(actual_events), len(events), actual_events) + if len(actual_events) != len(events): + expected = "\n".join(str(e) for e in events) + actual = "\n".join(str(a) for a in actual_events) + self.assertEqual( + len(actual_events), + len(events), + f"expected events:\n{expected}\nactual events:\n{actual}", + ) for idx, expected_event in enumerate(events): - self.match_evaluator.match_event(event_type, expected_event, actual_events[idx]) + self.match_evaluator.match_event(expected_event, actual_events[idx]) if has_server_connection_id: assert server_connection_id is not None @@ -1649,6 +1789,52 @@ def check_events(self, spec): else: assert server_connection_id is None + def check_log_messages(self, operations, spec): + def format_logs(log_list): + client_to_log = defaultdict(list) + for log in log_list: + if log.module == "ocsp_support": + continue + data = json_util.loads(log.message) + client = data.pop("clientId") + client_to_log[client].append( + { + "level": log.levelname.lower(), + "component": log.name.replace("pymongo.", "", 1), + "data": data, + } + ) + return client_to_log + + with self.assertLogs("pymongo", level="DEBUG") as cm: + self.run_operations(operations) + formatted_logs = format_logs(cm.records) + for client in spec: + components = set() + for message in client["messages"]: + components.add(message["component"]) + + clientid = self.entity_map[client["client"]]._topology_settings._topology_id + actual_logs = formatted_logs[clientid] + actual_logs = [log for log in actual_logs if log["component"] in components] + if client.get("ignoreExtraMessages", False): + actual_logs = actual_logs[: len(client["messages"])] + self.assertEqual(len(client["messages"]), len(actual_logs)) + for expected_msg, actual_msg in zip(client["messages"], actual_logs): + expected_data, actual_data = expected_msg.pop("data"), actual_msg.pop("data") + + if "failureIsRedacted" in expected_msg: + self.assertIn("failure", actual_data) + should_redact = expected_msg.pop("failureIsRedacted") + if should_redact: + actual_fields = set(json_util.loads(actual_data["failure"]).keys()) + self.assertTrue( + {"code", "codeName", "errorLabels"}.issuperset(actual_fields) + ) + + self.match_evaluator.match_result(expected_data, actual_data) + self.match_evaluator.match_result(expected_msg, actual_msg) + def verify_outcome(self, spec): for collection_data in spec: coll_name = collection_data["collectionName"] @@ -1667,6 +1853,12 @@ def verify_outcome(self, spec): self.assertListEqual(sorted_expected_documents, actual_documents) def run_scenario(self, spec, uri=None): + # Kill all sessions before and after each test to prevent an open + # transaction (from a test failure) from blocking collection/database + # operations during test set up and tear down. + self.kill_all_sessions() + self.addCleanup(self.kill_all_sessions) + if "csot" in self.id().lower(): # Retry CSOT tests up to 2 times to deal with flakey tests. attempts = 3 @@ -1707,10 +1899,18 @@ def _run_scenario(self, spec, uri=None): self.entity_map = EntityMapUtil(self) self.entity_map.create_entities_from_spec(self.TEST_SPEC.get("createEntities", []), uri=uri) # process initialData - self.insert_initial_data(self.TEST_SPEC.get("initialData", [])) - - # process operations - self.run_operations(spec["operations"]) + if "initialData" in self.TEST_SPEC: + self.insert_initial_data(self.TEST_SPEC["initialData"]) + self._cluster_time = self.client.admin.command("ping").get("$clusterTime") + self.entity_map.advance_cluster_times() + + if "expectLogMessages" in spec: + expect_log_messages = spec["expectLogMessages"] + self.assertTrue(expect_log_messages, "expectEvents must be non-empty") + self.check_log_messages(spec["operations"], expect_log_messages) + else: + # process operations + self.run_operations(spec["operations"]) # process expectEvents if "expectEvents" in spec: diff --git a/test/utils.py b/test/utils.py index c8f9197c64..15480dc440 100644 --- a/test/utils.py +++ b/test/utils.py @@ -39,9 +39,9 @@ from pymongo.cursor import CursorType from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat +from pymongo.helpers import _SENSITIVE_COMMANDS from pymongo.lock import _create_lock from pymongo.monitoring import ( - _SENSITIVE_COMMANDS, ConnectionCheckedInEvent, ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, @@ -54,6 +54,7 @@ PoolCreatedEvent, PoolReadyEvent, ) +from pymongo.operations import _Op from pymongo.pool import _CancellationContext, _PoolGeneration from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference @@ -280,6 +281,26 @@ def failed(self, event): self.add_event(event) +class HeartbeatEventsListListener(HeartbeatEventListener): + """Listens to only server heartbeat events and publishes them to a provided list.""" + + def __init__(self, events): + super().__init__() + self.event_list = events + + def started(self, event): + self.add_event(event) + self.event_list.append("serverHeartbeatStartedEvent") + + def succeeded(self, event): + self.add_event(event) + self.event_list.append("serverHeartbeatSucceededEvent") + + def failed(self, event): + self.add_event(event) + self.event_list.append("serverHeartbeatFailedEvent") + + class MockConnection: def __init__(self): self.cancel_context = _CancellationContext() @@ -296,7 +317,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class MockPool: - def __init__(self, address, options, handshake=True): + def __init__(self, address, options, handshake=True, client_id=None): self.gen = _PoolGeneration() self._lock = _create_lock() self.opts = options @@ -319,7 +340,7 @@ def _reset(self, service_id=None): def ready(self): pass - def reset(self, service_id=None): + def reset(self, service_id=None, interrupt_connections=False): self._reset() def reset_without_pause(self): @@ -558,7 +579,8 @@ def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs client_options.update(kwargs) uri = _connection_string(host) - if client_context.auth_enabled and authenticate: + auth_mech = kwargs.get("authMechanism", "") + if client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": # Only add the default username or password if one is not provided. res = parse_uri(uri) if ( @@ -569,7 +591,6 @@ def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs ): client_options["username"] = db_user client_options["password"] = db_pwd - return MongoClient(uri, port, **client_options) @@ -867,13 +888,16 @@ def stop(self): def get_pool(client): """Get the standalone, primary, or mongos pool.""" topology = client._get_topology() - server = topology.select_server(writable_server_selector) + server = topology.select_server(writable_server_selector, _Op.TEST) return server.pool def get_pools(client): """Get all pools.""" - return [server.pool for server in client._get_topology().select_servers(any_server_selector)] + return [ + server.pool + for server in client._get_topology().select_servers(any_server_selector, _Op.TEST) + ] # Constants for run_threads and lazy_client_trial. @@ -938,7 +962,7 @@ def gevent_monkey_patched(): try: import socket - import gevent.socket + import gevent.socket # type:ignore[import] return socket.socket is gevent.socket.socket except ImportError: @@ -991,7 +1015,7 @@ def parse_read_preference(pref): mode_string = mode_string[:1].lower() + mode_string[1:] mode = read_preferences.read_pref_mode_from_name(mode_string) max_staleness = pref.get("maxStalenessSeconds", -1) - tag_sets = pref.get("tag_sets") + tag_sets = pref.get("tagSets") or pref.get("tag_sets") return read_preferences.make_read_preference( mode, tag_sets=tag_sets, max_staleness=max_staleness ) @@ -1029,7 +1053,12 @@ def parse_spec_options(opts): opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) if "writeConcern" in opts: - opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) + w_opts = opts.pop("writeConcern") + if "journal" in w_opts: + w_opts["j"] = w_opts.pop("journal") + if "wtimeoutMS" in w_opts: + w_opts["wtimeout"] = w_opts.pop("wtimeoutMS") + opts["write_concern"] = WriteConcern(**dict(w_opts)) if "readConcern" in opts: opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) @@ -1153,3 +1182,9 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac raise AssertionError(f"Unsupported cursorType: {cursor_type}") else: arguments[c2s] = arguments.pop(arg_name) + + +def set_fail_point(client, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + client.admin.command(cmd) diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 7952a2862d..2b684bb0f1 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -19,6 +19,8 @@ import os import sys +from pymongo.operations import _Op + sys.path[0:0] = [""] from test import unittest @@ -178,7 +180,7 @@ def run_scenario(self): with self.assertRaises((ConfigurationError, ValueError)): # Error can be raised when making Read Pref or selecting. pref = parse_read_preference(pref_def) - top_latency.select_server(pref) + top_latency.select_server(pref, _Op.TEST) return pref = parse_read_preference(pref_def) @@ -186,18 +188,18 @@ def run_scenario(self): # Select servers. if not scenario_def.get("suitable_servers"): with self.assertRaises(AutoReconnect): - top_suitable.select_server(pref, server_selection_timeout=0) + top_suitable.select_server(pref, _Op.TEST, server_selection_timeout=0) return if not scenario_def["in_latency_window"]: with self.assertRaises(AutoReconnect): - top_latency.select_server(pref, server_selection_timeout=0) + top_latency.select_server(pref, _Op.TEST, server_selection_timeout=0) return - actual_suitable_s = top_suitable.select_servers(pref, server_selection_timeout=0) - actual_latency_s = top_latency.select_servers(pref, server_selection_timeout=0) + actual_suitable_s = top_suitable.select_servers(pref, _Op.TEST, server_selection_timeout=0) + actual_latency_s = top_latency.select_servers(pref, _Op.TEST, server_selection_timeout=0) expected_suitable_servers = {} for server in scenario_def["suitable_servers"]: diff --git a/tools/clean.py b/tools/clean.py index 15db9a411b..b6e1867a0a 100644 --- a/tools/clean.py +++ b/tools/clean.py @@ -41,7 +41,7 @@ pass try: - from bson import _cbson # noqa: F401 + from bson import _cbson # type: ignore[attr-defined] # noqa: F401 sys.exit("could still import _cbson") except ImportError: diff --git a/tools/compare_import_time.py b/tools/compare_import_time.py new file mode 100644 index 0000000000..fdc344f2e9 --- /dev/null +++ b/tools/compare_import_time.py @@ -0,0 +1,37 @@ +# Copyright 2024-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import sys + +base_sha = sys.argv[-1] +head_sha = sys.argv[-2] + + +def get_total_time(sha: str) -> int: + with open(f"pymongo-{sha}.log") as fid: + last_line = fid.readlines()[-1] + return int(last_line.split()[4]) + + +base_time = get_total_time(base_sha) +curr_time = get_total_time(head_sha) + +# Check if we got 20% or more slower. +change = int((curr_time - base_time) / base_time * 100) +if change > 20: + print(f"PyMongo import got {change} percent worse") + sys.exit(1) + +print(f"Import time changed by {change} percent") diff --git a/tools/ensure_future_annotations_import.py b/tools/ensure_future_annotations_import.py index 3e7e60bfd4..55080148e4 100644 --- a/tools/ensure_future_annotations_import.py +++ b/tools/ensure_future_annotations_import.py @@ -35,7 +35,7 @@ missing.append(path) if missing: - print(f"Missing '{pattern}' import in:") # noqa: T201 + print(f"Missing '{pattern}' import in:") for item in missing: - print(item) # noqa: T201 + print(item) sys.exit(1) diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index 2b59521c7d..6848e155aa 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -29,13 +29,21 @@ import pymongo # noqa: E402 if not pymongo.has_c() or not bson.has_c(): + try: + from pymongo import _cmessage # type:ignore[attr-defined] # noqa: F401 + except Exception as e: + print(e) + try: + from bson import _cbson # type:ignore[attr-defined] # noqa: F401 + except Exception as e: + print(e) sys.exit("could not load C extensions") if os.environ.get("ENSURE_UNIVERSAL2") == "1": parent_dir = Path(pymongo.__path__[0]).parent for pkg in ["pymongo", "bson", "grifs"]: for so_file in Path(f"{parent_dir}/{pkg}").glob("*.so"): - print(f"Checking universal2 compatibility in {so_file}...") # noqa: T201 + print(f"Checking universal2 compatibility in {so_file}...") output = subprocess.check_output(["file", so_file]) # noqa: S603, S607 if "arm64" not in output.decode("utf-8"): sys.exit("Universal wheel was not compiled with arm64 support") diff --git a/tox.ini b/tox.ini index 76c8700fef..331c73ce18 100644 --- a/tox.ini +++ b/tox.ini @@ -7,6 +7,10 @@ envlist = test, # Test using the run-tests Evergreen script. test-eg, + # Set up encryption files and services. + setup-encryption, + # Tear down encryption files and services. + teardown-encryption, # Run pre-commit on all files. lint, # Run pre-commit on all files, including stages that require manual fixes. @@ -21,16 +25,18 @@ envlist = typecheck, # Build sphinx docs doc, + # Server live sphinx docs + doc-serve, # Test sphinx docs doc-test, # Linkcheck sphinx docs linkcheck - # Check the sdist integrity. - manifest labels = # Use labels and -m instead of -e so that tox -m