diff --git a/.flake8 b/.flake8
index ed9316381c..29227d4cf4 100644
--- a/.flake8
+++ b/.flake8
@@ -26,6 +26,7 @@ exclude =
*_pb2.py
# Standard linting exemptions.
+ **/.nox/**
__pycache__,
.git,
*.pyc,
diff --git a/.github/header-checker-lint.yml b/.github/header-checker-lint.yml
new file mode 100644
index 0000000000..fc281c05bd
--- /dev/null
+++ b/.github/header-checker-lint.yml
@@ -0,0 +1,15 @@
+{"allowedCopyrightHolders": ["Google LLC"],
+ "allowedLicenses": ["Apache-2.0", "MIT", "BSD-3"],
+ "ignoreFiles": ["**/requirements.txt", "**/requirements-test.txt"],
+ "sourceFileExtensions": [
+ "ts",
+ "js",
+ "java",
+ "sh",
+ "Dockerfile",
+ "yaml",
+ "py",
+ "html",
+ "txt"
+ ]
+}
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index ac787a3b95..708cdcc9eb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -49,8 +49,10 @@ docs.metadata
# Virtual environment
env/
+
+# Test logs
coverage.xml
-sponge_log.xml
+*sponge_log.xml
# System test environment variables.
system_tests/local_test_setup
diff --git a/.kokoro/build.sh b/.kokoro/build.sh
index a847a74a4f..2d206c3a1c 100755
--- a/.kokoro/build.sh
+++ b/.kokoro/build.sh
@@ -15,7 +15,11 @@
set -eo pipefail
-cd github/python-spanner
+if [[ -z "${PROJECT_ROOT:-}" ]]; then
+ PROJECT_ROOT="github/python-spanner"
+fi
+
+cd "${PROJECT_ROOT}"
# Disable buffering, so that the logs stream through.
export PYTHONUNBUFFERED=1
@@ -33,16 +37,26 @@ export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json")
export GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE=true
# Remove old nox
-python3.6 -m pip uninstall --yes --quiet nox-automation
+python3 -m pip uninstall --yes --quiet nox-automation
# Install nox
-python3.6 -m pip install --upgrade --quiet nox
-python3.6 -m nox --version
+python3 -m pip install --upgrade --quiet nox
+python3 -m nox --version
+
+# If this is a continuous build, send the test log to the FlakyBot.
+# See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot.
+if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then
+ cleanup() {
+ chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ }
+ trap cleanup EXIT HUP
+fi
# If NOX_SESSION is set, it only runs the specified session,
# otherwise run all the sessions.
if [[ -n "${NOX_SESSION:-}" ]]; then
- python3.6 -m nox -s "${NOX_SESSION:-}"
+ python3 -m nox -s ${NOX_SESSION:-}
else
- python3.6 -m nox
+ python3 -m nox
fi
diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg
index 1118107829..505636c275 100644
--- a/.kokoro/docs/docs-presubmit.cfg
+++ b/.kokoro/docs/docs-presubmit.cfg
@@ -15,3 +15,14 @@ env_vars: {
key: "TRAMPOLINE_IMAGE_UPLOAD"
value: "false"
}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-spanner/.kokoro/build.sh"
+}
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "docs docfx"
+}
diff --git a/.kokoro/samples/python3.6/periodic-head.cfg b/.kokoro/samples/python3.6/periodic-head.cfg
new file mode 100644
index 0000000000..f9cfcd33e0
--- /dev/null
+++ b/.kokoro/samples/python3.6/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-pubsub/.kokoro/test-samples-against-head.sh"
+}
diff --git a/.kokoro/samples/python3.7/periodic-head.cfg b/.kokoro/samples/python3.7/periodic-head.cfg
new file mode 100644
index 0000000000..f9cfcd33e0
--- /dev/null
+++ b/.kokoro/samples/python3.7/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-pubsub/.kokoro/test-samples-against-head.sh"
+}
diff --git a/.kokoro/samples/python3.8/periodic-head.cfg b/.kokoro/samples/python3.8/periodic-head.cfg
new file mode 100644
index 0000000000..f9cfcd33e0
--- /dev/null
+++ b/.kokoro/samples/python3.8/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-pubsub/.kokoro/test-samples-against-head.sh"
+}
diff --git a/.kokoro/test-samples-against-head.sh b/.kokoro/test-samples-against-head.sh
new file mode 100755
index 0000000000..4398b30ba4
--- /dev/null
+++ b/.kokoro/test-samples-against-head.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# A customized test runner for samples.
+#
+# For periodic builds, you can specify this file for testing against head.
+
+# `-e` enables the script to automatically fail when a command fails
+# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero
+set -eo pipefail
+# Enables `**` to include files nested inside sub-folders
+shopt -s globstar
+
+cd github/python-spanner
+
+exec .kokoro/test-samples-impl.sh
diff --git a/.kokoro/test-samples-impl.sh b/.kokoro/test-samples-impl.sh
new file mode 100755
index 0000000000..cf5de74c17
--- /dev/null
+++ b/.kokoro/test-samples-impl.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# `-e` enables the script to automatically fail when a command fails
+# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero
+set -eo pipefail
+# Enables `**` to include files nested inside sub-folders
+shopt -s globstar
+
+# Exit early if samples directory doesn't exist
+if [ ! -d "./samples" ]; then
+ echo "No tests run. `./samples` not found"
+ exit 0
+fi
+
+# Disable buffering, so that the logs stream through.
+export PYTHONUNBUFFERED=1
+
+# Debug: show build environment
+env | grep KOKORO
+
+# Install nox
+python3.6 -m pip install --upgrade --quiet nox
+
+# Use secrets acessor service account to get secrets
+if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then
+ gcloud auth activate-service-account \
+ --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \
+ --project="cloud-devrel-kokoro-resources"
+fi
+
+# This script will create 3 files:
+# - testing/test-env.sh
+# - testing/service-account.json
+# - testing/client-secrets.json
+./scripts/decrypt-secrets.sh
+
+source ./testing/test-env.sh
+export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json
+
+# For cloud-run session, we activate the service account for gcloud sdk.
+gcloud auth activate-service-account \
+ --key-file "${GOOGLE_APPLICATION_CREDENTIALS}"
+
+export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json
+
+echo -e "\n******************** TESTING PROJECTS ********************"
+
+# Switch to 'fail at end' to allow all tests to complete before exiting.
+set +e
+# Use RTN to return a non-zero value if the test fails.
+RTN=0
+ROOT=$(pwd)
+# Find all requirements.txt in the samples directory (may break on whitespace).
+for file in samples/**/requirements.txt; do
+ cd "$ROOT"
+ # Navigate to the project folder.
+ file=$(dirname "$file")
+ cd "$file"
+
+ echo "------------------------------------------------------------"
+ echo "- testing $file"
+ echo "------------------------------------------------------------"
+
+ # Use nox to execute the tests for the project.
+ python3.6 -m nox -s "$RUN_TESTS_SESSION"
+ EXIT=$?
+
+ # If this is a periodic build, send the test log to the FlakyBot.
+ # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot.
+ if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
+ chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ fi
+
+ if [[ $EXIT -ne 0 ]]; then
+ RTN=1
+ echo -e "\n Testing failed: Nox returned a non-zero exit code. \n"
+ else
+ echo -e "\n Testing completed.\n"
+ fi
+
+done
+cd "$ROOT"
+
+# Workaround for Kokoro permissions issue: delete secrets
+rm testing/{test-env.sh,client-secrets.json,service-account.json}
+
+exit "$RTN"
diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh
index 86b7f9d906..19e3d5f529 100755
--- a/.kokoro/test-samples.sh
+++ b/.kokoro/test-samples.sh
@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# The default test runner for samples.
+#
+# For periodic builds, we rewinds the repo to the latest release, and
+# run test-samples-impl.sh.
# `-e` enables the script to automatically fail when a command fails
# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero
@@ -24,87 +28,19 @@ cd github/python-spanner
# Run periodic samples tests at latest release
if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
+ # preserving the test runner implementation.
+ cp .kokoro/test-samples-impl.sh "${TMPDIR}/test-samples-impl.sh"
+ echo "--- IMPORTANT IMPORTANT IMPORTANT ---"
+ echo "Now we rewind the repo back to the latest release..."
LATEST_RELEASE=$(git describe --abbrev=0 --tags)
git checkout $LATEST_RELEASE
-fi
-
-# Exit early if samples directory doesn't exist
-if [ ! -d "./samples" ]; then
- echo "No tests run. `./samples` not found"
- exit 0
-fi
-
-# Disable buffering, so that the logs stream through.
-export PYTHONUNBUFFERED=1
-
-# Debug: show build environment
-env | grep KOKORO
-
-# Install nox
-python3.6 -m pip install --upgrade --quiet nox
-
-# Use secrets acessor service account to get secrets
-if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then
- gcloud auth activate-service-account \
- --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \
- --project="cloud-devrel-kokoro-resources"
-fi
-
-# This script will create 3 files:
-# - testing/test-env.sh
-# - testing/service-account.json
-# - testing/client-secrets.json
-./scripts/decrypt-secrets.sh
-
-source ./testing/test-env.sh
-export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json
-
-# For cloud-run session, we activate the service account for gcloud sdk.
-gcloud auth activate-service-account \
- --key-file "${GOOGLE_APPLICATION_CREDENTIALS}"
-
-export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json
-
-echo -e "\n******************** TESTING PROJECTS ********************"
-
-# Switch to 'fail at end' to allow all tests to complete before exiting.
-set +e
-# Use RTN to return a non-zero value if the test fails.
-RTN=0
-ROOT=$(pwd)
-# Find all requirements.txt in the samples directory (may break on whitespace).
-for file in samples/**/requirements.txt; do
- cd "$ROOT"
- # Navigate to the project folder.
- file=$(dirname "$file")
- cd "$file"
-
- echo "------------------------------------------------------------"
- echo "- testing $file"
- echo "------------------------------------------------------------"
-
- # Use nox to execute the tests for the project.
- python3.6 -m nox -s "$RUN_TESTS_SESSION"
- EXIT=$?
-
- # If this is a periodic build, send the test log to the FlakyBot.
- # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot.
- if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
- chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
- $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ echo "The current head is: "
+ echo $(git rev-parse --verify HEAD)
+ echo "--- IMPORTANT IMPORTANT IMPORTANT ---"
+ # move back the test runner implementation if there's no file.
+ if [ ! -f .kokoro/test-samples-impl.sh ]; then
+ cp "${TMPDIR}/test-samples-impl.sh" .kokoro/test-samples-impl.sh
fi
+fi
- if [[ $EXIT -ne 0 ]]; then
- RTN=1
- echo -e "\n Testing failed: Nox returned a non-zero exit code. \n"
- else
- echo -e "\n Testing completed.\n"
- fi
-
-done
-cd "$ROOT"
-
-# Workaround for Kokoro permissions issue: delete secrets
-rm testing/{test-env.sh,client-secrets.json,service-account.json}
-
-exit "$RTN"
+exec .kokoro/test-samples-impl.sh
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000000..32302e4883
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,17 @@
+# See https://pre-commit.com for more information
+# See https://pre-commit.com/hooks.html for more hooks
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v3.4.0
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-yaml
+- repo: https://github.com/psf/black
+ rev: 19.10b0
+ hooks:
+ - id: black
+- repo: https://gitlab.com/pycqa/flake8
+ rev: 3.9.0
+ hooks:
+ - id: flake8
diff --git a/.trampolinerc b/.trampolinerc
index 995ee29111..383b6ec89f 100644
--- a/.trampolinerc
+++ b/.trampolinerc
@@ -24,6 +24,7 @@ required_envvars+=(
pass_down_envvars+=(
"STAGING_BUCKET"
"V2_STAGING_BUCKET"
+ "NOX_SESSION"
)
# Prevent unintentional override on the default image.
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bc4401829b..8714b709df 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,31 @@
[1]: https://pypi.org/project/google-cloud-spanner/#history
+## [3.3.0](https://www.github.com/googleapis/python-spanner/compare/v3.2.0...v3.3.0) (2021-03-25)
+
+
+### Features
+
+* add encryption_info to Database ([#284](https://www.github.com/googleapis/python-spanner/issues/284)) ([2fd0352](https://www.github.com/googleapis/python-spanner/commit/2fd0352f695d7ab85e57d8c4388f42f91cf39435))
+* add support for CMEK ([#105](https://www.github.com/googleapis/python-spanner/issues/105)) ([e990ff7](https://www.github.com/googleapis/python-spanner/commit/e990ff70342e7c2e27059e82c8d74cce39eb85d0))
+* add support for custom timeout and retry parameters in execute_update method in transactions ([#251](https://www.github.com/googleapis/python-spanner/issues/251)) ([8abaebd](https://www.github.com/googleapis/python-spanner/commit/8abaebd9edac198596e7bd51d068d50147d0391d))
+* added retry and timeout params to partition read in database and snapshot class ([#278](https://www.github.com/googleapis/python-spanner/issues/278)) ([1a7c9d2](https://www.github.com/googleapis/python-spanner/commit/1a7c9d296c23dfa7be7b07ea511a4a8fc2c0693f))
+* **db_api:** support executing several DDLs separated by semicolon ([#277](https://www.github.com/googleapis/python-spanner/issues/277)) ([801ddc8](https://www.github.com/googleapis/python-spanner/commit/801ddc87434ff9e3c86b1281ebfeac26195c06e8))
+
+
+### Bug Fixes
+
+* avoid consuming pending null values when merging ([#286](https://www.github.com/googleapis/python-spanner/issues/286)) ([c6cba9f](https://www.github.com/googleapis/python-spanner/commit/c6cba9fbe4c717f1f8e2a97e3f76bfe6b956e55b))
+* **db_api:** allow file path for credentials ([#221](https://www.github.com/googleapis/python-spanner/issues/221)) ([1de0284](https://www.github.com/googleapis/python-spanner/commit/1de028430b779a50d38242fe70567e92b560df5a))
+* **db_api:** ensure DDL statements are being executed ([#290](https://www.github.com/googleapis/python-spanner/issues/290)) ([baa02ee](https://www.github.com/googleapis/python-spanner/commit/baa02ee1a352f7c509a3e169927cf220913e521f))
+* **db_api:** revert Mutations API usage ([#285](https://www.github.com/googleapis/python-spanner/issues/285)) ([e5d4901](https://www.github.com/googleapis/python-spanner/commit/e5d4901e9b7111b39dfec4c56032875dc7c6e74c))
+
+
+### Documentation
+
+* fix docstring types and typos ([#259](https://www.github.com/googleapis/python-spanner/issues/259)) ([1b0ce1d](https://www.github.com/googleapis/python-spanner/commit/1b0ce1d2986085ce4033cf773eb6c5d3b904473c))
+* fix snapshot usage ([#291](https://www.github.com/googleapis/python-spanner/issues/291)) ([eee2181](https://www.github.com/googleapis/python-spanner/commit/eee218164c3177586b73278aa21495280984af89))
+
## [3.2.0](https://www.github.com/googleapis/python-spanner/compare/v3.1.0...v3.2.0) (2021-03-02)
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 11e26783be..176f8e514e 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -21,8 +21,8 @@ In order to add a feature:
- The feature must be documented in both the API and narrative
documentation.
-- The feature must work fully on the following CPython versions: 2.7,
- 3.5, 3.6, 3.7 and 3.8 on both UNIX and Windows.
+- The feature must work fully on the following CPython versions:
+ 3.6, 3.7, 3.8 and 3.9 on both UNIX and Windows.
- The feature must not add unnecessary dependencies (where
"unnecessary" is of course subjective, but new dependencies should
@@ -70,9 +70,14 @@ We use `nox `__ to instrument our tests.
- To test your changes, run unit tests with ``nox``::
$ nox -s unit-2.7
- $ nox -s unit-3.7
+ $ nox -s unit-3.8
$ ...
+- Args to pytest can be passed through the nox command separated by a `--`. For
+ example, to run a single test::
+
+ $ nox -s unit-3.8 -- -k
+
.. note::
The unit tests and system tests are described in the
@@ -93,8 +98,12 @@ On Debian/Ubuntu::
************
Coding Style
************
+- We use the automatic code formatter ``black``. You can run it using
+ the nox session ``blacken``. This will eliminate many lint errors. Run via::
-- PEP8 compliance, with exceptions defined in the linter configuration.
+ $ nox -s blacken
+
+- PEP8 compliance is required, with exceptions defined in the linter configuration.
If you have ``nox`` installed, you can test that you have not introduced
any non-compliant code via::
@@ -111,6 +120,16 @@ Coding Style
should point to the official ``googleapis`` checkout and the
the branch should be the main branch on that remote (``master``).
+- This repository contains configuration for the
+ `pre-commit `__ tool, which automates checking
+ our linters during a commit. If you have it installed on your ``$PATH``,
+ you can enable enforcing those checks via:
+
+.. code-block:: bash
+
+ $ pre-commit install
+ pre-commit installed at .git/hooks/pre-commit
+
Exceptions to PEP8:
- Many unit tests use a helper method, ``_call_fut`` ("FUT" is short for
@@ -123,13 +142,18 @@ Running System Tests
- To run system tests, you can execute::
- $ nox -s system-3.7
+ # Run all system tests
+ $ nox -s system-3.8
$ nox -s system-2.7
+ # Run a single system test
+ $ nox -s system-3.8 -- -k
+
+
.. note::
System tests are only configured to run under Python 2.7 and
- Python 3.7. For expediency, we do not run them in older versions
+ Python 3.8. For expediency, we do not run them in older versions
of Python 3.
This alone will not run the tests. You'll need to change some local
@@ -192,25 +216,24 @@ Supported Python Versions
We support:
-- `Python 3.5`_
- `Python 3.6`_
- `Python 3.7`_
- `Python 3.8`_
+- `Python 3.9`_
-.. _Python 3.5: https://docs.python.org/3.5/
.. _Python 3.6: https://docs.python.org/3.6/
.. _Python 3.7: https://docs.python.org/3.7/
.. _Python 3.8: https://docs.python.org/3.8/
+.. _Python 3.9: https://docs.python.org/3.9/
Supported versions can be found in our ``noxfile.py`` `config`_.
.. _config: https://github.com/googleapis/python-spanner/blob/master/noxfile.py
-Python 2.7 support is deprecated. All code changes should maintain Python 2.7 compatibility until January 1, 2020.
We also explicitly decided to support Python 3 beginning with version
-3.5. Reasons for this include:
+3.6. Reasons for this include:
- Encouraging use of newest versions of Python 3
- Taking the lead of `prominent`_ open-source `projects`_
diff --git a/LICENSE b/LICENSE
index a8ee855de2..d645695673 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,7 @@
- Apache License
+
+ Apache License
Version 2.0, January 2004
- https://www.apache.org/licenses/
+ http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
@@ -192,7 +193,7 @@
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- https://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/MANIFEST.in b/MANIFEST.in
index e9e29d1203..e783f4c620 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -16,10 +16,10 @@
# Generated by synthtool. DO NOT EDIT!
include README.rst LICENSE
-recursive-include google *.json *.proto
+recursive-include google *.json *.proto py.typed
recursive-include tests *
global-exclude *.py[co]
global-exclude __pycache__
# Exclude scripts for samples readmegen
-prune scripts/readme-gen
\ No newline at end of file
+prune scripts/readme-gen
diff --git a/docs/_static/custom.css b/docs/_static/custom.css
index 0abaf229fc..bcd37bbd3c 100644
--- a/docs/_static/custom.css
+++ b/docs/_static/custom.css
@@ -1,4 +1,9 @@
div#python2-eol {
border-color: red;
border-width: medium;
-}
\ No newline at end of file
+}
+
+/* Ensure minimum width for 'Parameters' / 'Returns' column */
+dl.field-list > dt {
+ min-width: 100px
+}
diff --git a/docs/conf.py b/docs/conf.py
index 7d53976561..ee774dd1c7 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -345,10 +345,11 @@
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
- "python": ("http://python.readthedocs.org/en/latest/", None),
- "google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
+ "python": ("https://python.readthedocs.org/en/latest/", None),
+ "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
- "grpc": ("https://grpc.io/grpc/python/", None),
+ "grpc": ("https://grpc.github.io/grpc/python/", None),
+ "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
}
diff --git a/docs/snapshot-usage.rst b/docs/snapshot-usage.rst
index e088cd0ceb..311ea8f3ca 100644
--- a/docs/snapshot-usage.rst
+++ b/docs/snapshot-usage.rst
@@ -65,16 +65,16 @@ Read Table Data
To read data for selected rows from a table in the database, call
:meth:`~google.cloud.spanner_v1.snapshot.Snapshot.read` which will return
-all rows specified in ``key_set``, or fail if the result set is too large,
+all rows specified in ``keyset``, or fail if the result set is too large,
.. code:: python
with database.snapshot() as snapshot:
result = snapshot.read(
table='table-name', columns=['first_name', 'last_name', 'age'],
- key_set=['phred@example.com', 'bharney@example.com'])
+ keyset=spanner.KeySet([['phred@example.com'], ['bharney@example.com']]))
- for row in result.rows:
+ for row in result:
print(row)
.. note::
@@ -100,7 +100,7 @@ result set is too large,
'WHERE p.employee_id == e.employee_id')
result = snapshot.execute_sql(QUERY)
- for row in list(result):
+ for row in result:
print(row)
.. note::
diff --git a/docs/spanner_admin_database_v1/database_admin.rst b/docs/spanner_admin_database_v1/database_admin.rst
new file mode 100644
index 0000000000..5618b72cd6
--- /dev/null
+++ b/docs/spanner_admin_database_v1/database_admin.rst
@@ -0,0 +1,11 @@
+DatabaseAdmin
+-------------------------------
+
+.. automodule:: google.cloud.spanner_admin_database_v1.services.database_admin
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.spanner_admin_database_v1.services.database_admin.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/spanner_admin_database_v1/services.rst b/docs/spanner_admin_database_v1/services.rst
index 770ff1a8c2..55e57d8dc0 100644
--- a/docs/spanner_admin_database_v1/services.rst
+++ b/docs/spanner_admin_database_v1/services.rst
@@ -1,6 +1,6 @@
Services for Google Cloud Spanner Admin Database v1 API
=======================================================
+.. toctree::
+ :maxdepth: 2
-.. automodule:: google.cloud.spanner_admin_database_v1.services.database_admin
- :members:
- :inherited-members:
+ database_admin
diff --git a/docs/spanner_admin_instance_v1/instance_admin.rst b/docs/spanner_admin_instance_v1/instance_admin.rst
new file mode 100644
index 0000000000..f18b5ca893
--- /dev/null
+++ b/docs/spanner_admin_instance_v1/instance_admin.rst
@@ -0,0 +1,11 @@
+InstanceAdmin
+-------------------------------
+
+.. automodule:: google.cloud.spanner_admin_instance_v1.services.instance_admin
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/spanner_admin_instance_v1/services.rst b/docs/spanner_admin_instance_v1/services.rst
index 44b02ecebb..407d44cc34 100644
--- a/docs/spanner_admin_instance_v1/services.rst
+++ b/docs/spanner_admin_instance_v1/services.rst
@@ -1,6 +1,6 @@
Services for Google Cloud Spanner Admin Instance v1 API
=======================================================
+.. toctree::
+ :maxdepth: 2
-.. automodule:: google.cloud.spanner_admin_instance_v1.services.instance_admin
- :members:
- :inherited-members:
+ instance_admin
diff --git a/docs/spanner_v1/services.rst b/docs/spanner_v1/services.rst
index 9dbd2fe03e..3bbbb55f79 100644
--- a/docs/spanner_v1/services.rst
+++ b/docs/spanner_v1/services.rst
@@ -1,6 +1,6 @@
Services for Google Cloud Spanner v1 API
========================================
+.. toctree::
+ :maxdepth: 2
-.. automodule:: google.cloud.spanner_v1.services.spanner
- :members:
- :inherited-members:
+ spanner
diff --git a/docs/spanner_v1/spanner.rst b/docs/spanner_v1/spanner.rst
new file mode 100644
index 0000000000..f7803df4ae
--- /dev/null
+++ b/docs/spanner_v1/spanner.rst
@@ -0,0 +1,11 @@
+Spanner
+-------------------------
+
+.. automodule:: google.cloud.spanner_v1.services.spanner
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.spanner_v1.services.spanner.pagers
+ :members:
+ :inherited-members:
diff --git a/google/cloud/spanner_admin_database_v1/__init__.py b/google/cloud/spanner_admin_database_v1/__init__.py
index 0f5bcd49b1..dded570012 100644
--- a/google/cloud/spanner_admin_database_v1/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/__init__.py
@@ -18,6 +18,7 @@
from .services.database_admin import DatabaseAdminClient
from .types.backup import Backup
from .types.backup import BackupInfo
+from .types.backup import CreateBackupEncryptionConfig
from .types.backup import CreateBackupMetadata
from .types.backup import CreateBackupRequest
from .types.backup import DeleteBackupRequest
@@ -27,6 +28,8 @@
from .types.backup import ListBackupsRequest
from .types.backup import ListBackupsResponse
from .types.backup import UpdateBackupRequest
+from .types.common import EncryptionConfig
+from .types.common import EncryptionInfo
from .types.common import OperationProgress
from .types.spanner_database_admin import CreateDatabaseMetadata
from .types.spanner_database_admin import CreateDatabaseRequest
@@ -40,6 +43,7 @@
from .types.spanner_database_admin import ListDatabasesRequest
from .types.spanner_database_admin import ListDatabasesResponse
from .types.spanner_database_admin import OptimizeRestoredDatabaseMetadata
+from .types.spanner_database_admin import RestoreDatabaseEncryptionConfig
from .types.spanner_database_admin import RestoreDatabaseMetadata
from .types.spanner_database_admin import RestoreDatabaseRequest
from .types.spanner_database_admin import RestoreInfo
@@ -51,6 +55,7 @@
__all__ = (
"Backup",
"BackupInfo",
+ "CreateBackupEncryptionConfig",
"CreateBackupMetadata",
"CreateBackupRequest",
"CreateDatabaseMetadata",
@@ -58,6 +63,8 @@
"Database",
"DeleteBackupRequest",
"DropDatabaseRequest",
+ "EncryptionConfig",
+ "EncryptionInfo",
"GetBackupRequest",
"GetDatabaseDdlRequest",
"GetDatabaseDdlResponse",
@@ -72,6 +79,7 @@
"ListDatabasesResponse",
"OperationProgress",
"OptimizeRestoredDatabaseMetadata",
+ "RestoreDatabaseEncryptionConfig",
"RestoreDatabaseMetadata",
"RestoreDatabaseRequest",
"RestoreInfo",
diff --git a/google/cloud/spanner_admin_database_v1/proto/backup.proto b/google/cloud/spanner_admin_database_v1/proto/backup.proto
index a677207f72..31fdb5326c 100644
--- a/google/cloud/spanner_admin_database_v1/proto/backup.proto
+++ b/google/cloud/spanner_admin_database_v1/proto/backup.proto
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -22,7 +22,6 @@ import "google/longrunning/operations.proto";
import "google/protobuf/field_mask.proto";
import "google/protobuf/timestamp.proto";
import "google/spanner/admin/database/v1/common.proto";
-import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1";
option go_package = "google.golang.org/genproto/googleapis/spanner/admin/database/v1;database";
@@ -52,14 +51,14 @@ message Backup {
READY = 2;
}
- // Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
- // Name of the database from which this backup was
- // created. This needs to be in the same instance as the backup.
- // Values are of the form
+ // Required for the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // operation. Name of the database from which this backup was created. This
+ // needs to be in the same instance as the backup. Values are of the form
// `projects//instances//databases/`.
string database = 2 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
+ type: "spanner.googleapis.com/Database"
+ }];
// The backup will contain an externally consistent copy of the database at
// the timestamp specified by `version_time`. If `version_time` is not
@@ -67,7 +66,8 @@ message Backup {
// backup.
google.protobuf.Timestamp version_time = 9;
- // Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // Required for the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
// operation. The expiration time of the backup, with microseconds
// granularity that must be at least 6 hours and at most 366 days
// from the time the CreateBackup request is processed. Once the `expire_time`
@@ -75,8 +75,11 @@ message Backup {
// Spanner to free the resources used by the backup.
google.protobuf.Timestamp expire_time = 3;
- // Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
- // Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation.
+ // Output only for the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // operation. Required for the
+ // [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
+ // operation.
//
// A globally unique identifier for the backup which cannot be
// changed. Values are of the form
@@ -90,10 +93,12 @@ message Backup {
// `projects//instances/`.
string name = 1;
- // Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // Output only. The time the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
// request is received. If the request does not specify `version_time`, the
// `version_time` of the backup will be equivalent to the `create_time`.
- google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
+ google.protobuf.Timestamp create_time = 4
+ [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Size of the backup in bytes.
int64 size_bytes = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
@@ -108,10 +113,20 @@ message Backup {
// any referencing database prevents the backup from being deleted. When a
// restored database from the backup enters the `READY` state, the reference
// to the backup is removed.
- repeated string referencing_databases = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
+ repeated string referencing_databases = 7 [
+ (google.api.field_behavior) = OUTPUT_ONLY,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }
+ ];
+
+ // Output only. The encryption information for the backup.
+ EncryptionInfo encryption_info = 8
+ [(google.api.field_behavior) = OUTPUT_ONLY];
}
-// The request for [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
+// The request for
+// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
message CreateBackupRequest {
// Required. The name of the instance in which the backup will be
// created. This must be the same instance that contains the database the
@@ -133,23 +148,32 @@ message CreateBackupRequest {
// Required. The backup to create.
Backup backup = 3 [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. The encryption configuration used to encrypt the backup. If this
+ // field is not specified, the backup will use the same encryption
+ // configuration as the database by default, namely
+ // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
+ // = `USE_DATABASE_ENCRYPTION`.
+ CreateBackupEncryptionConfig encryption_config = 4
+ [(google.api.field_behavior) = OPTIONAL];
}
// Metadata type for the operation returned by
// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
message CreateBackupMetadata {
// The name of the backup being created.
- string name = 1 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Backup"
- }];
+ string name = 1 [
+ (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
+ ];
// The name of the database the backup is created from.
string database = 2 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
+ type: "spanner.googleapis.com/Database"
+ }];
// The progress of the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // operation.
OperationProgress progress = 3;
// The time at which cancellation of this operation was received.
@@ -161,12 +185,14 @@ message CreateBackupMetadata {
// other methods to check whether the cancellation succeeded or whether the
// operation completed despite cancellation. On successful cancellation,
// the operation is not deleted; instead, it becomes an operation with
- // an [Operation.error][] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ // an [Operation.error][google.longrunning.Operation.error] value with a
+ // [google.rpc.Status.code][google.rpc.Status.code] of 1,
// corresponding to `Code.CANCELLED`.
google.protobuf.Timestamp cancel_time = 4;
}
-// The request for [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
+// The request for
+// [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
message UpdateBackupRequest {
// Required. The backup to update. `backup.name`, and the fields to be updated
// as specified by `update_mask` are required. Other fields are ignored.
@@ -179,36 +205,36 @@ message UpdateBackupRequest {
// resource, not to the request message. The field mask must always be
// specified; this prevents any future fields from being erased accidentally
// by clients that do not know about them.
- google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
+ google.protobuf.FieldMask update_mask = 2
+ [(google.api.field_behavior) = REQUIRED];
}
-// The request for [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
+// The request for
+// [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
message GetBackupRequest {
// Required. Name of the backup.
// Values are of the form
// `projects//instances//backups/`.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Backup"
- }
+ (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
];
}
-// The request for [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
+// The request for
+// [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
message DeleteBackupRequest {
// Required. Name of the backup to delete.
// Values are of the form
// `projects//instances//backups/`.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "spanner.googleapis.com/Backup"
- }
+ (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
];
}
-// The request for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+// The request for
+// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
message ListBackupsRequest {
// Required. The instance to list backups from. Values are of the
// form `projects//instances/`.
@@ -227,13 +253,16 @@ message ListBackupsRequest {
// must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
// Colon `:` is the contains operator. Filter rules are not case sensitive.
//
- // The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering:
+ // The following fields in the
+ // [Backup][google.spanner.admin.database.v1.Backup] are eligible for
+ // filtering:
//
// * `name`
// * `database`
// * `state`
- // * `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
- // * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
+ // * `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
+ // * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
+ // * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
// * `size_bytes`
//
// You can combine multiple expressions by enclosing each expression in
@@ -260,21 +289,23 @@ message ListBackupsRequest {
int32 page_size = 3;
// If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a
- // previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same
- // `filter`.
+ // [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
+ // from a previous
+ // [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
+ // to the same `parent` and with the same `filter`.
string page_token = 4;
}
-// The response for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+// The response for
+// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
message ListBackupsResponse {
// The list of matching backups. Backups returned are ordered by `create_time`
// in descending order, starting from the most recent `create_time`.
repeated Backup backups = 1;
// `next_page_token` can be sent in a subsequent
- // [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more
- // of the matching backups.
+ // [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
+ // call to fetch more of the matching backups.
string next_page_token = 2;
}
@@ -304,7 +335,9 @@ message ListBackupOperationsRequest {
// * `name` - The name of the long-running operation
// * `done` - False if the operation is in progress, else true.
// * `metadata.@type` - the type of metadata. For example, the type string
- // for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is
+ // for
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
+ // is
// `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
// * `metadata.` - any field in metadata.value.
// * `error` - Error associated with the long-running operation.
@@ -324,7 +357,8 @@ message ListBackupOperationsRequest {
// `(metadata.name:howl) AND` \
// `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
// `(error:*)` - Returns operations where:
- // * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ // * The operation's metadata type is
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
// * The backup name contains the string "howl".
// * The operation started before 2018-03-28T14:50:00Z.
// * The operation resulted in an error.
@@ -336,8 +370,9 @@ message ListBackupOperationsRequest {
// If non-empty, `page_token` should contain a
// [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
- // from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the
- // same `parent` and with the same `filter`.
+ // from a previous
+ // [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
+ // to the same `parent` and with the same `filter`.
string page_token = 4;
}
@@ -348,11 +383,11 @@ message ListBackupOperationsResponse {
// operations][google.longrunning.Operation]. Each operation's name will be
// prefixed by the backup's name and the operation's
// [metadata][google.longrunning.Operation.metadata] will be of type
- // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. Operations returned include those that are
- // pending or have completed/failed/canceled within the last 7 days.
- // Operations returned are ordered by
- // `operation.metadata.value.progress.start_time` in descending order starting
- // from the most recently started operation.
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ // Operations returned include those that are pending or have
+ // completed/failed/canceled within the last 7 days. Operations returned are
+ // ordered by `operation.metadata.value.progress.start_time` in descending
+ // order starting from the most recently started operation.
repeated google.longrunning.Operation operations = 1;
// `next_page_token` can be sent in a subsequent
@@ -364,23 +399,63 @@ message ListBackupOperationsResponse {
// Information about a backup.
message BackupInfo {
// Name of the backup.
- string backup = 1 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Backup"
- }];
+ string backup = 1 [
+ (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
+ ];
// The backup contains an externally consistent copy of `source_database` at
// the timestamp specified by `version_time`. If the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
- // `version_time`, the `version_time` of the backup is equivalent to the
- // `create_time`.
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // request did not specify `version_time`, the `version_time` of the backup is
+ // equivalent to the `create_time`.
google.protobuf.Timestamp version_time = 4;
- // The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
- // received.
+ // The time the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // request was received.
google.protobuf.Timestamp create_time = 2;
// Name of the database the backup was created from.
string source_database = 3 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
+ type: "spanner.googleapis.com/Database"
+ }];
+}
+
+// Encryption configuration for the backup to create.
+message CreateBackupEncryptionConfig {
+ // Encryption types for the backup.
+ enum EncryptionType {
+ // Unspecified. Do not use.
+ ENCRYPTION_TYPE_UNSPECIFIED = 0;
+
+ // Use the same encryption configuration as the database. This is the
+ // default option when
+ // [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig]
+ // is empty. For example, if the database is using
+ // `Customer_Managed_Encryption`, the backup will be using the same Cloud
+ // KMS key as the database.
+ USE_DATABASE_ENCRYPTION = 1;
+
+ // Use Google default encryption.
+ GOOGLE_DEFAULT_ENCRYPTION = 2;
+
+ // Use customer managed encryption. If specified, `kms_key_name`
+ // must contain a valid Cloud KMS key.
+ CUSTOMER_MANAGED_ENCRYPTION = 3;
+ }
+
+ // Required. The encryption type of the backup.
+ EncryptionType encryption_type = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. The Cloud KMS key that will be used to protect the backup.
+ // This field should be set only when
+ // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
+ // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
+ // `projects//locations//keyRings//cryptoKeys/`.
+ string kms_key_name = 2 [
+ (google.api.field_behavior) = OPTIONAL,
+ (google.api.resource_reference) = {
+ type: "cloudkms.googleapis.com/CryptoKey"
+ }
+ ];
}
diff --git a/google/cloud/spanner_admin_database_v1/proto/common.proto b/google/cloud/spanner_admin_database_v1/proto/common.proto
index 27ecb0a98b..24d7c2d080 100644
--- a/google/cloud/spanner_admin_database_v1/proto/common.proto
+++ b/google/cloud/spanner_admin_database_v1/proto/common.proto
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -17,8 +17,9 @@ syntax = "proto3";
package google.spanner.admin.database.v1;
import "google/api/field_behavior.proto";
+import "google/api/resource.proto";
import "google/protobuf/timestamp.proto";
-import "google/api/annotations.proto";
+import "google/rpc/status.proto";
option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1";
option go_package = "google.golang.org/genproto/googleapis/spanner/admin/database/v1;database";
@@ -27,6 +28,14 @@ option java_outer_classname = "CommonProto";
option java_package = "com.google.spanner.admin.database.v1";
option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1";
option ruby_package = "Google::Cloud::Spanner::Admin::Database::V1";
+option (google.api.resource_definition) = {
+ type: "cloudkms.googleapis.com/CryptoKey"
+ pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}"
+};
+option (google.api.resource_definition) = {
+ type: "cloudkms.googleapis.com/CryptoKeyVersion"
+ pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}"
+};
// Encapsulates progress related information for a Cloud Spanner long
// running operation.
@@ -42,3 +51,50 @@ message OperationProgress {
// successfully.
google.protobuf.Timestamp end_time = 3;
}
+
+// Encryption configuration for a Cloud Spanner database.
+message EncryptionConfig {
+ // The Cloud KMS key to be used for encrypting and decrypting
+ // the database. Values are of the form
+ // `projects//locations//keyRings//cryptoKeys/`.
+ string kms_key_name = 2 [(google.api.resource_reference) = {
+ type: "cloudkms.googleapis.com/CryptoKey"
+ }];
+}
+
+// Encryption information for a Cloud Spanner database or backup.
+message EncryptionInfo {
+ // Possible encryption types.
+ enum Type {
+ // Encryption type was not specified, though data at rest remains encrypted.
+ TYPE_UNSPECIFIED = 0;
+
+ // The data is encrypted at rest with a key that is
+ // fully managed by Google. No key version or status will be populated.
+ // This is the default state.
+ GOOGLE_DEFAULT_ENCRYPTION = 1;
+
+ // The data is encrypted at rest with a key that is
+ // managed by the customer. The active version of the key. `kms_key_version`
+ // will be populated, and `encryption_status` may be populated.
+ CUSTOMER_MANAGED_ENCRYPTION = 2;
+ }
+
+ // Output only. The type of encryption.
+ Type encryption_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. If present, the status of a recent encrypt/decrypt call on
+ // underlying data for this database or backup. Regardless of status, data is
+ // always encrypted at rest.
+ google.rpc.Status encryption_status = 4
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. A Cloud KMS key version that is being used to protect the
+ // database or backup.
+ string kms_key_version = 2 [
+ (google.api.field_behavior) = OUTPUT_ONLY,
+ (google.api.resource_reference) = {
+ type: "cloudkms.googleapis.com/CryptoKeyVersion"
+ }
+ ];
+}
diff --git a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto
index 12e751bd67..ac771bc061 100644
--- a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto
+++ b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -65,10 +65,11 @@ service DatabaseAdmin {
// have a name of the format `/operations/` and
// can be used to track preparation of the database. The
// [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The
- // [response][google.longrunning.Operation.response] field type is
+ // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
// [Database][google.spanner.admin.database.v1.Database], if successful.
- rpc CreateDatabase(CreateDatabaseRequest) returns (google.longrunning.Operation) {
+ rpc CreateDatabase(CreateDatabaseRequest)
+ returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{parent=projects/*/instances/*}/databases"
body: "*"
@@ -94,8 +95,10 @@ service DatabaseAdmin {
// the format `/operations/` and can be used to
// track execution of the schema change(s). The
// [metadata][google.longrunning.Operation.metadata] field type is
- // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response.
- rpc UpdateDatabaseDdl(UpdateDatabaseDdlRequest) returns (google.longrunning.Operation) {
+ // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
+ // The operation has no response.
+ rpc UpdateDatabaseDdl(UpdateDatabaseDdlRequest)
+ returns (google.longrunning.Operation) {
option (google.api.http) = {
patch: "/v1/{database=projects/*/instances/*/databases/*}/ddl"
body: "*"
@@ -134,7 +137,8 @@ service DatabaseAdmin {
// permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
// For backups, authorization requires `spanner.backups.setIamPolicy`
// permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
- rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) {
+ rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest)
+ returns (google.iam.v1.Policy) {
option (google.api.http) = {
post: "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy"
body: "*"
@@ -154,7 +158,8 @@ service DatabaseAdmin {
// [resource][google.iam.v1.GetIamPolicyRequest.resource].
// For backups, authorization requires `spanner.backups.getIamPolicy`
// permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].
- rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) {
+ rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest)
+ returns (google.iam.v1.Policy) {
option (google.api.http) = {
post: "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy"
body: "*"
@@ -176,7 +181,8 @@ service DatabaseAdmin {
// Calling this method on a backup that does not exist will
// result in a NOT_FOUND error if the user has
// `spanner.backups.list` permission on the containing instance.
- rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) {
+ rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest)
+ returns (google.iam.v1.TestIamPermissionsResponse) {
option (google.api.http) = {
post: "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions"
body: "*"
@@ -194,12 +200,12 @@ service DatabaseAdmin {
// `projects//instances//backups//operations/`
// and can be used to track creation of the backup. The
// [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The
- // [response][google.longrunning.Operation.response] field type is
- // [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the
- // creation and delete the backup.
- // There can be only one pending backup creation per database. Backup creation
- // of different databases can run concurrently.
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Backup][google.spanner.admin.database.v1.Backup], if successful.
+ // Cancelling the returned operation will stop the creation and delete the
+ // backup. There can be only one pending backup creation per database. Backup
+ // creation of different databases can run concurrently.
rpc CreateBackup(CreateBackupRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{parent=projects/*/instances/*}/backups"
@@ -212,7 +218,8 @@ service DatabaseAdmin {
};
}
- // Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
+ // Gets metadata on a pending or completed
+ // [Backup][google.spanner.admin.database.v1.Backup].
rpc GetBackup(GetBackupRequest) returns (Backup) {
option (google.api.http) = {
get: "/v1/{name=projects/*/instances/*/backups/*}"
@@ -220,7 +227,8 @@ service DatabaseAdmin {
option (google.api.method_signature) = "name";
}
- // Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
+ // Updates a pending or completed
+ // [Backup][google.spanner.admin.database.v1.Backup].
rpc UpdateBackup(UpdateBackupRequest) returns (Backup) {
option (google.api.http) = {
patch: "/v1/{backup.name=projects/*/instances/*/backups/*}"
@@ -229,7 +237,8 @@ service DatabaseAdmin {
option (google.api.method_signature) = "backup,update_mask";
}
- // Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
+ // Deletes a pending or completed
+ // [Backup][google.spanner.admin.database.v1.Backup].
rpc DeleteBackup(DeleteBackupRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
delete: "/v1/{name=projects/*/instances/*/backups/*}"
@@ -264,7 +273,8 @@ service DatabaseAdmin {
// Once the restore operation completes, a new restore operation can be
// initiated, without waiting for the optimize operation associated with the
// first restore to complete.
- rpc RestoreDatabase(RestoreDatabaseRequest) returns (google.longrunning.Operation) {
+ rpc RestoreDatabase(RestoreDatabaseRequest)
+ returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{parent=projects/*/instances/*}/databases:restore"
body: "*"
@@ -284,7 +294,8 @@ service DatabaseAdmin {
// `metadata.type_url` describes the type of the metadata. Operations returned
// include those that have completed/failed/canceled within the last 7 days,
// and pending operations.
- rpc ListDatabaseOperations(ListDatabaseOperationsRequest) returns (ListDatabaseOperationsResponse) {
+ rpc ListDatabaseOperations(ListDatabaseOperationsRequest)
+ returns (ListDatabaseOperationsResponse) {
option (google.api.http) = {
get: "/v1/{parent=projects/*/instances/*}/databaseOperations"
};
@@ -301,7 +312,8 @@ service DatabaseAdmin {
// and pending operations. Operations returned are ordered by
// `operation.metadata.value.progress.start_time` in descending order starting
// from the most recently started operation.
- rpc ListBackupOperations(ListBackupOperationsRequest) returns (ListBackupOperationsResponse) {
+ rpc ListBackupOperations(ListBackupOperationsRequest)
+ returns (ListBackupOperationsResponse) {
option (google.api.http) = {
get: "/v1/{parent=projects/*/instances/*}/backupOperations"
};
@@ -363,25 +375,51 @@ message Database {
State state = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. If exists, the time at which the database creation started.
- google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
+ google.protobuf.Timestamp create_time = 3
+ [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Applicable only for restored databases. Contains information
// about the restore source.
RestoreInfo restore_info = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
+ // Output only. For databases that are using customer managed encryption, this
+ // field contains the encryption configuration for the database.
+ // For databases that are using Google default or other types of encryption,
+ // this field is empty.
+ EncryptionConfig encryption_config = 5
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. For databases that are using customer managed encryption, this
+ // field contains the encryption information for the database, such as
+ // encryption state and the Cloud KMS key versions that are in use.
+ //
+ // For databases that are using Google default or other types of encryption,
+ // this field is empty.
+ //
+ // This field is propagated lazily from the backend. There might be a delay
+ // from when a key version is being used and when it appears in this field.
+ repeated EncryptionInfo encryption_info = 8
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
// Output only. The period in which Cloud Spanner retains all versions of data
// for the database. This is the same as the value of version_retention_period
// database option set using
- // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. Defaults to 1 hour,
- // if not set.
- string version_retention_period = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
+ // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
+ // Defaults to 1 hour, if not set.
+ string version_retention_period = 6
+ [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Earliest timestamp at which older versions of the data can be
- // read.
- google.protobuf.Timestamp earliest_version_time = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
+ // read. This value is continuously updated by Cloud Spanner and becomes stale
+ // the moment it is queried. If you are using this value to recover data, make
+ // sure to account for the time from the moment when the value is queried to
+ // the moment when you initiate the recovery.
+ google.protobuf.Timestamp earliest_version_time = 7
+ [(google.api.field_behavior) = OUTPUT_ONLY];
}
-// The request for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+// The request for
+// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
message ListDatabasesRequest {
// Required. The instance whose databases should be listed.
// Values are of the form `projects//instances/`.
@@ -397,23 +435,26 @@ message ListDatabasesRequest {
int32 page_size = 3;
// If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a
- // previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
+ // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
+ // from a previous
+ // [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
string page_token = 4;
}
-// The response for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+// The response for
+// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
message ListDatabasesResponse {
// Databases that matched the request.
repeated Database databases = 1;
// `next_page_token` can be sent in a subsequent
- // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more
- // of the matching databases.
+ // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
+ // call to fetch more of the matching databases.
string next_page_token = 2;
}
-// The request for [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
+// The request for
+// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
message CreateDatabaseRequest {
// Required. The name of the instance that will serve the new database.
// Values are of the form `projects//instances/`.
@@ -436,6 +477,12 @@ message CreateDatabaseRequest {
// statements execute atomically with the creation of the database:
// if there is an error in any statement, the database is not created.
repeated string extra_statements = 3 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. The encryption configuration for the database. If this field is
+ // not specified, Cloud Spanner will encrypt/decrypt all data at rest using
+ // Google default encryption.
+ EncryptionConfig encryption_config = 4
+ [(google.api.field_behavior) = OPTIONAL];
}
// Metadata type for the operation returned by
@@ -443,11 +490,12 @@ message CreateDatabaseRequest {
message CreateDatabaseMetadata {
// The database being created.
string database = 1 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
+ type: "spanner.googleapis.com/Database"
+ }];
}
-// The request for [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
+// The request for
+// [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
message GetDatabaseRequest {
// Required. The name of the requested database. Values are of the form
// `projects//instances//databases/`.
@@ -473,8 +521,8 @@ message GetDatabaseRequest {
// Each batch of statements is assigned a name which can be used with
// the [Operations][google.longrunning.Operations] API to monitor
// progress. See the
-// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] field for more
-// details.
+// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
+// field for more details.
message UpdateDatabaseDdlRequest {
// Required. The database to update.
string database = 1 [
@@ -494,18 +542,20 @@ message UpdateDatabaseDdlRequest {
//
// Specifying an explicit operation ID simplifies determining
// whether the statements were executed in the event that the
- // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed,
- // or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and
- // `operation_id` fields can be combined to form the
+ // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
+ // call is replayed, or the return value is otherwise lost: the
+ // [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
+ // and `operation_id` fields can be combined to form the
// [name][google.longrunning.Operation.name] of the resulting
- // [longrunning.Operation][google.longrunning.Operation]: `/operations/`.
+ // [longrunning.Operation][google.longrunning.Operation]:
+ // `/operations/`.
//
// `operation_id` should be unique within the database, and must be
// a valid identifier: `[a-z][a-z0-9_]*`. Note that
// automatically-generated operation IDs always begin with an
// underscore. If the named operation already exists,
- // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns
- // `ALREADY_EXISTS`.
+ // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
+ // returns `ALREADY_EXISTS`.
string operation_id = 3;
}
@@ -514,8 +564,8 @@ message UpdateDatabaseDdlRequest {
message UpdateDatabaseDdlMetadata {
// The database being modified.
string database = 1 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
+ type: "spanner.googleapis.com/Database"
+ }];
// For an update this list contains all the statements. For an
// individual statement, this list contains only that statement.
@@ -532,7 +582,8 @@ message UpdateDatabaseDdlMetadata {
bool throttled = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
}
-// The request for [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
+// The request for
+// [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
message DropDatabaseRequest {
// Required. The database to be dropped.
string database = 1 [
@@ -543,7 +594,8 @@ message DropDatabaseRequest {
];
}
-// The request for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+// The request for
+// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
message GetDatabaseDdlRequest {
// Required. The database whose schema we wish to get.
// Values are of the form
@@ -556,7 +608,8 @@ message GetDatabaseDdlRequest {
];
}
-// The response for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+// The response for
+// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
message GetDatabaseDdlResponse {
// A list of formatted DDL statements defining the schema of the database
// specified in the request.
@@ -589,7 +642,9 @@ message ListDatabaseOperationsRequest {
// * `name` - The name of the long-running operation
// * `done` - False if the operation is in progress, else true.
// * `metadata.@type` - the type of metadata. For example, the type string
- // for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is
+ // for
+ // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
+ // is
// `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
// * `metadata.` - any field in metadata.value.
// * `error` - Error associated with the long-running operation.
@@ -609,7 +664,8 @@ message ListDatabaseOperationsRequest {
// `(metadata.name:restored_howl) AND` \
// `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
// `(error:*)` - Return operations where:
- // * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
+ // * The operation's metadata type is
+ // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
// * The database is restored from a backup.
// * The backup name contains "backup_howl".
// * The restored database's name contains "restored_howl".
@@ -623,8 +679,9 @@ message ListDatabaseOperationsRequest {
// If non-empty, `page_token` should contain a
// [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
- // from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the
- // same `parent` and with the same `filter`.
+ // from a previous
+ // [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
+ // to the same `parent` and with the same `filter`.
string page_token = 4;
}
@@ -670,9 +727,54 @@ message RestoreDatabaseRequest {
// Name of the backup from which to restore. Values are of the form
// `projects//instances//backups/`.
string backup = 3 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Backup"
- }];
+ type: "spanner.googleapis.com/Backup"
+ }];
}
+
+ // Optional. An encryption configuration describing the encryption type and
+ // key resources in Cloud KMS used to encrypt/decrypt the database to restore
+ // to. If this field is not specified, the restored database will use the same
+ // encryption configuration as the backup by default, namely
+ // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
+ // = `USE_CONFIG_DEFAULT_OR_DATABASE_ENCRYPTION`.
+ RestoreDatabaseEncryptionConfig encryption_config = 4
+ [(google.api.field_behavior) = OPTIONAL];
+}
+
+// Encryption configuration for the restored database.
+message RestoreDatabaseEncryptionConfig {
+ // Encryption types for the database to be restored.
+ enum EncryptionType {
+ // Unspecified. Do not use.
+ ENCRYPTION_TYPE_UNSPECIFIED = 0;
+
+ // This is the default option when
+ // [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig]
+ // is not specified.
+ USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1;
+
+ // Use Google default encryption.
+ GOOGLE_DEFAULT_ENCRYPTION = 2;
+
+ // Use customer managed encryption. If specified, `kms_key_name` must
+ // must contain a valid Cloud KMS key.
+ CUSTOMER_MANAGED_ENCRYPTION = 3;
+ }
+
+ // Required. The encryption type of the restored database.
+ EncryptionType encryption_type = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. The Cloud KMS key that will be used to encrypt/decrypt the
+ // restored database. This field should be set only when
+ // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
+ // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
+ // `projects//locations//keyRings//cryptoKeys/`.
+ string kms_key_name = 2 [
+ (google.api.field_behavior) = OPTIONAL,
+ (google.api.resource_reference) = {
+ type: "cloudkms.googleapis.com/CryptoKey"
+ }
+ ];
}
// Metadata type for the long-running operation returned by
@@ -680,14 +782,15 @@ message RestoreDatabaseRequest {
message RestoreDatabaseMetadata {
// Name of the database being created and restored to.
string name = 1 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
+ type: "spanner.googleapis.com/Database"
+ }];
// The type of the restore source.
RestoreSourceType source_type = 2;
// Information about the source used to restore the database, as specified by
- // `source` in [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest].
+ // `source` in
+ // [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest].
oneof source_info {
// Information about the backup used to restore the database.
BackupInfo backup_info = 3;
@@ -708,7 +811,8 @@ message RestoreDatabaseMetadata {
// operation completed despite cancellation. On successful cancellation,
// the operation is not deleted; instead, it becomes an operation with
// an [Operation.error][google.longrunning.Operation.error] value with a
- // [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`.
+ // [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
+ // `Code.CANCELLED`.
google.protobuf.Timestamp cancel_time = 5;
// If exists, the name of the long-running operation that will be used to
@@ -718,10 +822,10 @@ message RestoreDatabaseMetadata {
// `projects//instances//databases//operations/`
// where the is the name of database being created and restored to.
// The metadata type of the long-running operation is
- // [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be
- // automatically created by the system after the RestoreDatabase long-running
- // operation completes successfully. This operation will not be created if the
- // restore was not successful.
+ // [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
+ // This long-running operation will be automatically created by the system
+ // after the RestoreDatabase long-running operation completes successfully.
+ // This operation will not be created if the restore was not successful.
string optimize_database_operation_name = 6;
}
@@ -732,8 +836,8 @@ message RestoreDatabaseMetadata {
message OptimizeRestoredDatabaseMetadata {
// Name of the restored database being optimized.
string name = 1 [(google.api.resource_reference) = {
- type: "spanner.googleapis.com/Database"
- }];
+ type: "spanner.googleapis.com/Database"
+ }];
// The progress of the post-restore optimizations.
OperationProgress progress = 2;
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
index f64e8202bf..31b97af061 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
@@ -33,6 +33,7 @@
from google.cloud.spanner_admin_database_v1.services.database_admin import pagers
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import common
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore
from google.iam.v1 import policy_pb2 as policy # type: ignore
@@ -62,6 +63,12 @@ class DatabaseAdminAsyncClient:
backup_path = staticmethod(DatabaseAdminClient.backup_path)
parse_backup_path = staticmethod(DatabaseAdminClient.parse_backup_path)
+ crypto_key_path = staticmethod(DatabaseAdminClient.crypto_key_path)
+ parse_crypto_key_path = staticmethod(DatabaseAdminClient.parse_crypto_key_path)
+ crypto_key_version_path = staticmethod(DatabaseAdminClient.crypto_key_version_path)
+ parse_crypto_key_version_path = staticmethod(
+ DatabaseAdminClient.parse_crypto_key_version_path
+ )
database_path = staticmethod(DatabaseAdminClient.database_path)
parse_database_path = staticmethod(DatabaseAdminClient.parse_database_path)
instance_path = staticmethod(DatabaseAdminClient.instance_path)
@@ -194,7 +201,7 @@ async def list_databases(
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabasesAsyncPager:
The response for
- [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+ [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
Iterating over this object will yield results and
resolve additional pages automatically.
@@ -687,7 +694,7 @@ async def get_database_ddl(
Returns:
google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse:
The response for
- [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+ [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
"""
# Create or coerce a protobuf request object.
@@ -1524,7 +1531,7 @@ async def list_backups(
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupsAsyncPager:
The response for
- [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+ [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
Iterating over this object will yield results and
resolve additional pages automatically.
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
index 8deca17c5d..83cfeb248f 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
@@ -37,6 +37,7 @@
from google.cloud.spanner_admin_database_v1.services.database_admin import pagers
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import common
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore
from google.iam.v1 import policy_pb2 as policy # type: ignore
@@ -185,6 +186,53 @@ def parse_backup_path(path: str) -> Dict[str, str]:
)
return m.groupdict() if m else {}
+ @staticmethod
+ def crypto_key_path(
+ project: str, location: str, key_ring: str, crypto_key: str,
+ ) -> str:
+ """Return a fully-qualified crypto_key string."""
+ return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format(
+ project=project,
+ location=location,
+ key_ring=key_ring,
+ crypto_key=crypto_key,
+ )
+
+ @staticmethod
+ def parse_crypto_key_path(path: str) -> Dict[str, str]:
+ """Parse a crypto_key path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def crypto_key_version_path(
+ project: str,
+ location: str,
+ key_ring: str,
+ crypto_key: str,
+ crypto_key_version: str,
+ ) -> str:
+ """Return a fully-qualified crypto_key_version string."""
+ return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format(
+ project=project,
+ location=location,
+ key_ring=key_ring,
+ crypto_key=crypto_key,
+ crypto_key_version=crypto_key_version,
+ )
+
+ @staticmethod
+ def parse_crypto_key_version_path(path: str) -> Dict[str, str]:
+ """Parse a crypto_key_version path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)/cryptoKeyVersions/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
@staticmethod
def database_path(project: str, instance: str, database: str,) -> str:
"""Return a fully-qualified database string."""
@@ -419,7 +467,7 @@ def list_databases(
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabasesPager:
The response for
- [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+ [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
Iterating over this object will yield results and
resolve additional pages automatically.
@@ -754,9 +802,8 @@ def update_database_ddl(
if database is not None:
request.database = database
-
- if statements:
- request.statements.extend(statements)
+ if statements is not None:
+ request.statements = statements
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -885,7 +932,7 @@ def get_database_ddl(
Returns:
google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse:
The response for
- [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+ [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
"""
# Create or coerce a protobuf request object.
@@ -1041,7 +1088,11 @@ def set_iam_policy(
request = iam_policy.SetIamPolicyRequest(**request)
elif not request:
- request = iam_policy.SetIamPolicyRequest(resource=resource,)
+ # Null request, just make one.
+ request = iam_policy.SetIamPolicyRequest()
+
+ if resource is not None:
+ request.resource = resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -1174,7 +1225,11 @@ def get_iam_policy(
request = iam_policy.GetIamPolicyRequest(**request)
elif not request:
- request = iam_policy.GetIamPolicyRequest(resource=resource,)
+ # Null request, just make one.
+ request = iam_policy.GetIamPolicyRequest()
+
+ if resource is not None:
+ request.resource = resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -1262,9 +1317,14 @@ def test_iam_permissions(
request = iam_policy.TestIamPermissionsRequest(**request)
elif not request:
- request = iam_policy.TestIamPermissionsRequest(
- resource=resource, permissions=permissions,
- )
+ # Null request, just make one.
+ request = iam_policy.TestIamPermissionsRequest()
+
+ if resource is not None:
+ request.resource = resource
+
+ if permissions:
+ request.permissions.extend(permissions)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -1675,7 +1735,7 @@ def list_backups(
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupsPager:
The response for
- [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+ [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
Iterating over this object will yield results and
resolve additional pages automatically.
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py b/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py
index 4e5ea62e3f..933ca91c5a 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py
@@ -15,7 +15,16 @@
# limitations under the License.
#
-from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple
+from typing import (
+ Any,
+ AsyncIterable,
+ Awaitable,
+ Callable,
+ Iterable,
+ Sequence,
+ Tuple,
+ Optional,
+)
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
diff --git a/google/cloud/spanner_admin_database_v1/types/__init__.py b/google/cloud/spanner_admin_database_v1/types/__init__.py
index 79b682aab9..9749add377 100644
--- a/google/cloud/spanner_admin_database_v1/types/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/types/__init__.py
@@ -15,7 +15,11 @@
# limitations under the License.
#
-from .common import OperationProgress
+from .common import (
+ OperationProgress,
+ EncryptionConfig,
+ EncryptionInfo,
+)
from .backup import (
Backup,
CreateBackupRequest,
@@ -28,6 +32,7 @@
ListBackupOperationsRequest,
ListBackupOperationsResponse,
BackupInfo,
+ CreateBackupEncryptionConfig,
)
from .spanner_database_admin import (
RestoreInfo,
@@ -45,6 +50,7 @@
ListDatabaseOperationsRequest,
ListDatabaseOperationsResponse,
RestoreDatabaseRequest,
+ RestoreDatabaseEncryptionConfig,
RestoreDatabaseMetadata,
OptimizeRestoredDatabaseMetadata,
RestoreSourceType,
@@ -52,6 +58,8 @@
__all__ = (
"OperationProgress",
+ "EncryptionConfig",
+ "EncryptionInfo",
"Backup",
"CreateBackupRequest",
"CreateBackupMetadata",
@@ -63,6 +71,7 @@
"ListBackupOperationsRequest",
"ListBackupOperationsResponse",
"BackupInfo",
+ "CreateBackupEncryptionConfig",
"RestoreInfo",
"Database",
"ListDatabasesRequest",
@@ -78,6 +87,7 @@
"ListDatabaseOperationsRequest",
"ListDatabaseOperationsResponse",
"RestoreDatabaseRequest",
+ "RestoreDatabaseEncryptionConfig",
"RestoreDatabaseMetadata",
"OptimizeRestoredDatabaseMetadata",
"RestoreSourceType",
diff --git a/google/cloud/spanner_admin_database_v1/types/backup.py b/google/cloud/spanner_admin_database_v1/types/backup.py
index 6062cc5444..7d95a007f4 100644
--- a/google/cloud/spanner_admin_database_v1/types/backup.py
+++ b/google/cloud/spanner_admin_database_v1/types/backup.py
@@ -38,6 +38,7 @@
"ListBackupOperationsRequest",
"ListBackupOperationsResponse",
"BackupInfo",
+ "CreateBackupEncryptionConfig",
},
)
@@ -103,6 +104,9 @@ class Backup(proto.Message):
from being deleted. When a restored database from the backup
enters the ``READY`` state, the reference to the backup is
removed.
+ encryption_info (google.cloud.spanner_admin_database_v1.types.EncryptionInfo):
+ Output only. The encryption information for
+ the backup.
"""
class State(proto.Enum):
@@ -127,6 +131,10 @@ class State(proto.Enum):
referencing_databases = proto.RepeatedField(proto.STRING, number=7)
+ encryption_info = proto.Field(
+ proto.MESSAGE, number=8, message=common.EncryptionInfo,
+ )
+
class CreateBackupRequest(proto.Message):
r"""The request for
@@ -147,6 +155,13 @@ class CreateBackupRequest(proto.Message):
``projects//instances//backups/``.
backup (google.cloud.spanner_admin_database_v1.types.Backup):
Required. The backup to create.
+ encryption_config (google.cloud.spanner_admin_database_v1.types.CreateBackupEncryptionConfig):
+ Optional. The encryption configuration used to encrypt the
+ backup. If this field is not specified, the backup will use
+ the same encryption configuration as the database by
+ default, namely
+ [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
+ = ``USE_DATABASE_ENCRYPTION``.
"""
parent = proto.Field(proto.STRING, number=1)
@@ -155,6 +170,10 @@ class CreateBackupRequest(proto.Message):
backup = proto.Field(proto.MESSAGE, number=3, message="Backup",)
+ encryption_config = proto.Field(
+ proto.MESSAGE, number=4, message="CreateBackupEncryptionConfig",
+ )
+
class CreateBackupMetadata(proto.Message):
r"""Metadata type for the operation returned by
@@ -181,10 +200,10 @@ class CreateBackupMetadata(proto.Message):
or other methods to check whether the cancellation succeeded
or whether the operation completed despite cancellation. On
successful cancellation, the operation is not deleted;
- instead, it becomes an operation with an [Operation.error][]
- value with a
- [google.rpc.Status.code][google.rpc.Status.code] of 1,
- corresponding to ``Code.CANCELLED``.
+ instead, it becomes an operation with an
+ [Operation.error][google.longrunning.Operation.error] value
+ with a [google.rpc.Status.code][google.rpc.Status.code] of
+ 1, corresponding to ``Code.CANCELLED``.
"""
name = proto.Field(proto.STRING, number=1)
@@ -278,6 +297,8 @@ class ListBackupsRequest(proto.Message):
YYYY-MM-DDTHH:MM:SSZ)
- ``expire_time`` (and values are of the format
YYYY-MM-DDTHH:MM:SSZ)
+ - ``version_time`` (and values are of the format
+ YYYY-MM-DDTHH:MM:SSZ)
- ``size_bytes``
You can combine multiple expressions by enclosing each
@@ -493,4 +514,30 @@ class BackupInfo(proto.Message):
source_database = proto.Field(proto.STRING, number=3)
+class CreateBackupEncryptionConfig(proto.Message):
+ r"""Encryption configuration for the backup to create.
+
+ Attributes:
+ encryption_type (google.cloud.spanner_admin_database_v1.types.CreateBackupEncryptionConfig.EncryptionType):
+ Required. The encryption type of the backup.
+ kms_key_name (str):
+ Optional. The Cloud KMS key that will be used to protect the
+ backup. This field should be set only when
+ [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
+ is ``CUSTOMER_MANAGED_ENCRYPTION``. Values are of the form
+ ``projects//locations//keyRings//cryptoKeys/``.
+ """
+
+ class EncryptionType(proto.Enum):
+ r"""Encryption types for the backup."""
+ ENCRYPTION_TYPE_UNSPECIFIED = 0
+ USE_DATABASE_ENCRYPTION = 1
+ GOOGLE_DEFAULT_ENCRYPTION = 2
+ CUSTOMER_MANAGED_ENCRYPTION = 3
+
+ encryption_type = proto.Field(proto.ENUM, number=1, enum=EncryptionType,)
+
+ kms_key_name = proto.Field(proto.STRING, number=2)
+
+
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/spanner_admin_database_v1/types/common.py b/google/cloud/spanner_admin_database_v1/types/common.py
index c43dbdb580..2f552d19fd 100644
--- a/google/cloud/spanner_admin_database_v1/types/common.py
+++ b/google/cloud/spanner_admin_database_v1/types/common.py
@@ -19,10 +19,12 @@
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.rpc import status_pb2 as status # type: ignore
__protobuf__ = proto.module(
- package="google.spanner.admin.database.v1", manifest={"OperationProgress",},
+ package="google.spanner.admin.database.v1",
+ manifest={"OperationProgress", "EncryptionConfig", "EncryptionInfo",},
)
@@ -48,4 +50,47 @@ class OperationProgress(proto.Message):
end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,)
+class EncryptionConfig(proto.Message):
+ r"""Encryption configuration for a Cloud Spanner database.
+
+ Attributes:
+ kms_key_name (str):
+ The Cloud KMS key to be used for encrypting and decrypting
+ the database. Values are of the form
+ ``projects//locations//keyRings//cryptoKeys/``.
+ """
+
+ kms_key_name = proto.Field(proto.STRING, number=2)
+
+
+class EncryptionInfo(proto.Message):
+ r"""Encryption information for a Cloud Spanner database or
+ backup.
+
+ Attributes:
+ encryption_type (google.cloud.spanner_admin_database_v1.types.EncryptionInfo.Type):
+ Output only. The type of encryption.
+ encryption_status (google.rpc.status_pb2.Status):
+ Output only. If present, the status of a
+ recent encrypt/decrypt call on underlying data
+ for this database or backup. Regardless of
+ status, data is always encrypted at rest.
+ kms_key_version (str):
+ Output only. A Cloud KMS key version that is
+ being used to protect the database or backup.
+ """
+
+ class Type(proto.Enum):
+ r"""Possible encryption types."""
+ TYPE_UNSPECIFIED = 0
+ GOOGLE_DEFAULT_ENCRYPTION = 1
+ CUSTOMER_MANAGED_ENCRYPTION = 2
+
+ encryption_type = proto.Field(proto.ENUM, number=3, enum=Type,)
+
+ encryption_status = proto.Field(proto.MESSAGE, number=4, message=status.Status,)
+
+ kms_key_version = proto.Field(proto.STRING, number=2)
+
+
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py
index fce6a20e31..c7309dbbde 100644
--- a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py
+++ b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py
@@ -43,6 +43,7 @@
"ListDatabaseOperationsRequest",
"ListDatabaseOperationsResponse",
"RestoreDatabaseRequest",
+ "RestoreDatabaseEncryptionConfig",
"RestoreDatabaseMetadata",
"OptimizeRestoredDatabaseMetadata",
},
@@ -92,6 +93,25 @@ class Database(proto.Message):
Output only. Applicable only for restored
databases. Contains information about the
restore source.
+ encryption_config (google.cloud.spanner_admin_database_v1.types.EncryptionConfig):
+ Output only. For databases that are using
+ customer managed encryption, this field contains
+ the encryption configuration for the database.
+ For databases that are using Google default or
+ other types of encryption, this field is empty.
+ encryption_info (Sequence[google.cloud.spanner_admin_database_v1.types.EncryptionInfo]):
+ Output only. For databases that are using
+ customer managed encryption, this field contains
+ the encryption information for the database,
+ such as encryption state and the Cloud KMS key
+ versions that are in use.
+ For databases that are using Google default or
+ other types of encryption, this field is empty.
+
+ This field is propagated lazily from the
+ backend. There might be a delay from when a key
+ version is being used and when it appears in
+ this field.
version_retention_period (str):
Output only. The period in which Cloud Spanner retains all
versions of data for the database. This is the same as the
@@ -100,7 +120,13 @@ class Database(proto.Message):
Defaults to 1 hour, if not set.
earliest_version_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Earliest timestamp at which
- older versions of the data can be read.
+ older versions of the data can be read. This
+ value is continuously updated by Cloud Spanner
+ and becomes stale the moment it is queried. If
+ you are using this value to recover data, make
+ sure to account for the time from the moment
+ when the value is queried to the moment when you
+ initiate the recovery.
"""
class State(proto.Enum):
@@ -118,6 +144,14 @@ class State(proto.Enum):
restore_info = proto.Field(proto.MESSAGE, number=4, message="RestoreInfo",)
+ encryption_config = proto.Field(
+ proto.MESSAGE, number=5, message=common.EncryptionConfig,
+ )
+
+ encryption_info = proto.RepeatedField(
+ proto.MESSAGE, number=8, message=common.EncryptionInfo,
+ )
+
version_retention_period = proto.Field(proto.STRING, number=6)
earliest_version_time = proto.Field(
@@ -197,6 +231,11 @@ class CreateDatabaseRequest(proto.Message):
statements execute atomically with the creation
of the database: if there is an error in any
statement, the database is not created.
+ encryption_config (google.cloud.spanner_admin_database_v1.types.EncryptionConfig):
+ Optional. The encryption configuration for
+ the database. If this field is not specified,
+ Cloud Spanner will encrypt/decrypt all data at
+ rest using Google default encryption.
"""
parent = proto.Field(proto.STRING, number=1)
@@ -205,6 +244,10 @@ class CreateDatabaseRequest(proto.Message):
extra_statements = proto.RepeatedField(proto.STRING, number=3)
+ encryption_config = proto.Field(
+ proto.MESSAGE, number=4, message=common.EncryptionConfig,
+ )
+
class CreateDatabaseMetadata(proto.Message):
r"""Metadata type for the operation returned by
@@ -490,6 +533,14 @@ class RestoreDatabaseRequest(proto.Message):
Name of the backup from which to restore. Values are of the
form
``projects//instances//backups/``.
+ encryption_config (google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig):
+ Optional. An encryption configuration describing the
+ encryption type and key resources in Cloud KMS used to
+ encrypt/decrypt the database to restore to. If this field is
+ not specified, the restored database will use the same
+ encryption configuration as the backup by default, namely
+ [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
+ = ``USE_CONFIG_DEFAULT_OR_DATABASE_ENCRYPTION``.
"""
parent = proto.Field(proto.STRING, number=1)
@@ -498,6 +549,38 @@ class RestoreDatabaseRequest(proto.Message):
backup = proto.Field(proto.STRING, number=3, oneof="source")
+ encryption_config = proto.Field(
+ proto.MESSAGE, number=4, message="RestoreDatabaseEncryptionConfig",
+ )
+
+
+class RestoreDatabaseEncryptionConfig(proto.Message):
+ r"""Encryption configuration for the restored database.
+
+ Attributes:
+ encryption_type (google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig.EncryptionType):
+ Required. The encryption type of the restored
+ database.
+ kms_key_name (str):
+ Optional. The Cloud KMS key that will be used to
+ encrypt/decrypt the restored database. This field should be
+ set only when
+ [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
+ is ``CUSTOMER_MANAGED_ENCRYPTION``. Values are of the form
+ ``projects//locations//keyRings//cryptoKeys/``.
+ """
+
+ class EncryptionType(proto.Enum):
+ r"""Encryption types for the database to be restored."""
+ ENCRYPTION_TYPE_UNSPECIFIED = 0
+ USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1
+ GOOGLE_DEFAULT_ENCRYPTION = 2
+ CUSTOMER_MANAGED_ENCRYPTION = 3
+
+ encryption_type = proto.Field(proto.ENUM, number=1, enum=EncryptionType,)
+
+ kms_key_name = proto.Field(proto.STRING, number=2)
+
class RestoreDatabaseMetadata(proto.Message):
r"""Metadata type for the long-running operation returned by
diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py
index 369d9fcced..2dc7b8e6c3 100644
--- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py
+++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py
@@ -1185,7 +1185,11 @@ def set_iam_policy(
request = iam_policy.SetIamPolicyRequest(**request)
elif not request:
- request = iam_policy.SetIamPolicyRequest(resource=resource,)
+ # Null request, just make one.
+ request = iam_policy.SetIamPolicyRequest()
+
+ if resource is not None:
+ request.resource = resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -1314,7 +1318,11 @@ def get_iam_policy(
request = iam_policy.GetIamPolicyRequest(**request)
elif not request:
- request = iam_policy.GetIamPolicyRequest(resource=resource,)
+ # Null request, just make one.
+ request = iam_policy.GetIamPolicyRequest()
+
+ if resource is not None:
+ request.resource = resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -1399,9 +1407,14 @@ def test_iam_permissions(
request = iam_policy.TestIamPermissionsRequest(**request)
elif not request:
- request = iam_policy.TestIamPermissionsRequest(
- resource=resource, permissions=permissions,
- )
+ # Null request, just make one.
+ request = iam_policy.TestIamPermissionsRequest()
+
+ if resource is not None:
+ request.resource = resource
+
+ if permissions:
+ request.permissions.extend(permissions)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/pagers.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/pagers.py
index 85e1823da5..1b9404231d 100644
--- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/pagers.py
+++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/pagers.py
@@ -15,7 +15,16 @@
# limitations under the License.
#
-from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple
+from typing import (
+ Any,
+ AsyncIterable,
+ Awaitable,
+ Callable,
+ Iterable,
+ Sequence,
+ Tuple,
+ Optional,
+)
from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin
diff --git a/google/cloud/spanner_dbapi/connection.py b/google/cloud/spanner_dbapi/connection.py
index 6438605d3b..926408c928 100644
--- a/google/cloud/spanner_dbapi/connection.py
+++ b/google/cloud/spanner_dbapi/connection.py
@@ -243,7 +243,10 @@ def commit(self):
"""
if self._autocommit:
warnings.warn(AUTOCOMMIT_MODE_WARNING, UserWarning, stacklevel=2)
- elif self.inside_transaction:
+ return
+
+ self.run_prior_DDL_statements()
+ if self.inside_transaction:
try:
self._transaction.commit()
self._release_session()
@@ -290,6 +293,10 @@ def run_statement(self, statement, retried=False):
:type statement: :class:`dict`
:param statement: SQL statement to execute.
+ :type retried: bool
+ :param retried: (Optional) Retry the SQL statement if statement
+ execution failed. Defaults to false.
+
:rtype: :class:`google.cloud.spanner_v1.streamed.StreamedResultSet`,
:class:`google.cloud.spanner_dbapi.checksum.ResultsChecksum`
:returns: Streamed result set of the statement and a
@@ -353,10 +360,13 @@ def connect(
instances, tables and data. If not provided, will
attempt to determine from the environment.
- :type credentials: :class:`~google.auth.credentials.Credentials`
+ :type credentials: Union[:class:`~google.auth.credentials.Credentials`, str]
:param credentials: (Optional) The authorization credentials to attach to
requests. These credentials identify this application
- to the service. If none are specified, the client will
+ to the service. These credentials may be specified as
+ a file path indicating where to retrieve the service
+ account JSON for the credentials to connect to
+ Cloud Spanner. If none are specified, the client will
attempt to ascertain the credentials from the
environment.
@@ -380,9 +390,14 @@ def connect(
user_agent=user_agent or DEFAULT_USER_AGENT, python_version=PY_VERSION
)
- client = spanner.Client(
- project=project, credentials=credentials, client_info=client_info
- )
+ if isinstance(credentials, str):
+ client = spanner.Client.from_service_account_json(
+ credentials, project=project, client_info=client_info
+ )
+ else:
+ client = spanner.Client(
+ project=project, credentials=credentials, client_info=client_info
+ )
instance = client.instance(instance_id)
if not instance.exists():
diff --git a/google/cloud/spanner_dbapi/cursor.py b/google/cloud/spanner_dbapi/cursor.py
index dd097d5fc5..a28879faba 100644
--- a/google/cloud/spanner_dbapi/cursor.py
+++ b/google/cloud/spanner_dbapi/cursor.py
@@ -138,17 +138,10 @@ def callproc(self, procname, args=None):
self._raise_if_closed()
def close(self):
- """Prepare and execute a Spanner database operation.
-
- :type sql: str
- :param sql: A SQL query statement.
-
- :type args: list
- :param args: Additional parameters to supplement the SQL query.
- """
+ """Closes this cursor."""
self._is_closed = True
- def _do_execute_update(self, transaction, sql, params, param_types=None):
+ def _do_execute_update(self, transaction, sql, params):
sql = parse_utils.ensure_where_clause(sql)
sql, params = parse_utils.sql_pyformat_args_to_spanner(sql, params)
@@ -181,7 +174,12 @@ def execute(self, sql, args=None):
try:
classification = parse_utils.classify_stmt(sql)
if classification == parse_utils.STMT_DDL:
- self.connection._ddl_statements.append(sql)
+ for ddl in sql.split(";"):
+ ddl = ddl.strip()
+ if ddl:
+ self.connection._ddl_statements.append(ddl)
+ if self.connection.autocommit:
+ self.connection.run_prior_DDL_statements()
return
# For every other operation, we've got to ensure that
diff --git a/google/cloud/spanner_dbapi/parse_utils.py b/google/cloud/spanner_dbapi/parse_utils.py
index f76689fdf2..082074251c 100644
--- a/google/cloud/spanner_dbapi/parse_utils.py
+++ b/google/cloud/spanner_dbapi/parse_utils.py
@@ -237,10 +237,10 @@ def parse_insert(insert_sql, params):
Params: ['a', 'b', 'c', 'd']
it produces:
{
- 'homogenous': True,
- 'table': 'T',
- 'columns': ['f1', 'f2'],
- 'values': [('a', 'b',), ('c', 'd',)],
+ 'sql_params_list': [
+ ('INSERT INTO T (f1, f2) VALUES (%s, %s)', ('a', 'b')),
+ ('INSERT INTO T (f1, f2) VALUES (%s, %s)', ('c', 'd'))
+ ],
}
Case d)
@@ -249,7 +249,7 @@ def parse_insert(insert_sql, params):
it produces:
{
'sql_params_list': [
- ('INSERT INTO T (f1, f2) VALUES (%s, LOWER(%s))', ('a', 'b',))
+ ('INSERT INTO T (f1, f2) VALUES (%s, LOWER(%s))', ('a', 'b',)),
('INSERT INTO T (f1, f2) VALUES (UPPER(%s), %s)', ('c', 'd',))
],
}
@@ -306,15 +306,19 @@ def parse_insert(insert_sql, params):
# Case c)
columns = [mi.strip(" `") for mi in match.group("columns").split(",")]
+ sql_params_list = []
+ insert_sql_preamble = "INSERT INTO %s (%s) VALUES %s" % (
+ match.group("table_name"),
+ match.group("columns"),
+ values.argv[0],
+ )
values_pyformat = [str(arg) for arg in values.argv]
rows_list = rows_for_insert_or_update(columns, params, values_pyformat)
+ insert_sql_preamble = sanitize_literals_for_upload(insert_sql_preamble)
+ for row in rows_list:
+ sql_params_list.append((insert_sql_preamble, row))
- return {
- "homogenous": True,
- "table": match.group("table_name"),
- "columns": columns,
- "values": rows_list,
- }
+ return {"sql_params_list": sql_params_list}
# Case d)
# insert_sql is of the form:
diff --git a/google/cloud/spanner_v1/_helpers.py b/google/cloud/spanner_v1/_helpers.py
index 0f56431cb3..bac1f68edb 100644
--- a/google/cloud/spanner_v1/_helpers.py
+++ b/google/cloud/spanner_v1/_helpers.py
@@ -53,19 +53,19 @@ def _merge_query_options(base, merge):
"""Merge higher precedence QueryOptions with current QueryOptions.
:type base:
- :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions`
+ :class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions`
or :class:`dict` or None
:param base: The current QueryOptions that is intended for use.
:type merge:
- :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions`
+ :class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions`
or :class:`dict` or None
:param merge:
The QueryOptions that have a higher priority than base. These options
should overwrite the fields in base.
:rtype:
- :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions`
+ :class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions`
or None
:returns:
QueryOptions object formed by merging the two given QueryOptions.
@@ -167,7 +167,7 @@ def _parse_value_pb(value_pb, field_type):
:type value_pb: :class:`~google.protobuf.struct_pb2.Value`
:param value_pb: protobuf to convert
- :type field_type: :class:`~google.cloud.spanner_v1.Type`
+ :type field_type: :class:`~google.cloud.spanner_v1.types.Type`
:param field_type: type code for the value
:rtype: varies on field_type
@@ -220,7 +220,7 @@ def _parse_list_value_pbs(rows, row_type):
:type rows: list of :class:`~google.protobuf.struct_pb2.ListValue`
:param rows: row data returned from a read/query
- :type row_type: :class:`~google.cloud.spanner_v1.StructType`
+ :type row_type: :class:`~google.cloud.spanner_v1.types.StructType`
:param row_type: row schema specification
:rtype: list of list of cell data
diff --git a/google/cloud/spanner_v1/backup.py b/google/cloud/spanner_v1/backup.py
index 2277a33fce..dba7ba1fcb 100644
--- a/google/cloud/spanner_v1/backup.py
+++ b/google/cloud/spanner_v1/backup.py
@@ -19,6 +19,8 @@
from google.cloud.exceptions import NotFound
from google.cloud.spanner_admin_database_v1 import Backup as BackupPB
+from google.cloud.spanner_admin_database_v1 import CreateBackupEncryptionConfig
+from google.cloud.spanner_admin_database_v1 import CreateBackupRequest
from google.cloud.spanner_v1._helpers import _metadata_with_prefix
_BACKUP_NAME_RE = re.compile(
@@ -57,10 +59,24 @@ class Backup(object):
the externally consistent copy of the database. If
not present, it is the same as the `create_time` of
the backup.
+
+ :type encryption_config:
+ :class:`~google.cloud.spanner_admin_database_v1.types.CreateBackupEncryptionConfig`
+ or :class:`dict`
+ :param encryption_config:
+ (Optional) Encryption configuration for the backup.
+ If a dict is provided, it must be of the same form as the protobuf
+ message :class:`~google.cloud.spanner_admin_database_v1.types.CreateBackupEncryptionConfig`
"""
def __init__(
- self, backup_id, instance, database="", expire_time=None, version_time=None
+ self,
+ backup_id,
+ instance,
+ database="",
+ expire_time=None,
+ version_time=None,
+ encryption_config=None,
):
self.backup_id = backup_id
self._instance = instance
@@ -71,6 +87,11 @@ def __init__(
self._size_bytes = None
self._state = None
self._referencing_databases = None
+ self._encryption_info = None
+ if type(encryption_config) == dict:
+ self._encryption_config = CreateBackupEncryptionConfig(**encryption_config)
+ else:
+ self._encryption_config = encryption_config
@property
def name(self):
@@ -141,7 +162,7 @@ def size_bytes(self):
def state(self):
"""State of this backup.
- :rtype: :class:`~google.cloud.spanner_admin_database_v1.Backup.State`
+ :rtype: :class:`~google.cloud.spanner_admin_database_v1.types.Backup.State`
:returns: an enum describing the state of the backup
"""
return self._state
@@ -156,11 +177,19 @@ def referencing_databases(self):
"""
return self._referencing_databases
+ @property
+ def encryption_info(self):
+ """Encryption info for this backup.
+ :rtype: :class:`~google.clod.spanner_admin_database_v1.types.EncryptionInfo`
+ :returns: a class representing the encryption info
+ """
+ return self._encryption_info
+
@classmethod
def from_pb(cls, backup_pb, instance):
"""Create an instance of this class from a protobuf message.
- :type backup_pb: :class:`~google.spanner.admin.database.v1.Backup`
+ :type backup_pb: :class:`~google.cloud.spanner_admin_database_v1.types.Backup`
:param backup_pb: A backup protobuf object.
:type instance: :class:`~google.cloud.spanner_v1.instance.Instance`
@@ -207,6 +236,13 @@ def create(self):
raise ValueError("expire_time not set")
if not self._database:
raise ValueError("database not set")
+ if (
+ self._encryption_config
+ and self._encryption_config.kms_key_name
+ and self._encryption_config.encryption_type
+ != CreateBackupEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION
+ ):
+ raise ValueError("kms_key_name only used with CUSTOMER_MANAGED_ENCRYPTION")
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
backup = BackupPB(
@@ -215,12 +251,14 @@ def create(self):
version_time=self.version_time,
)
- future = api.create_backup(
+ request = CreateBackupRequest(
parent=self._instance.name,
backup_id=self.backup_id,
backup=backup,
- metadata=metadata,
+ encryption_config=self._encryption_config,
)
+
+ future = api.create_backup(request=request, metadata=metadata,)
return future
def exists(self):
@@ -255,6 +293,7 @@ def reload(self):
self._size_bytes = pb.size_bytes
self._state = BackupPB.State(pb.state)
self._referencing_databases = pb.referencing_databases
+ self._encryption_info = pb.encryption_info
def update_expire_time(self, new_expire_time):
"""Update the expire time of this backup.
diff --git a/google/cloud/spanner_v1/batch.py b/google/cloud/spanner_v1/batch.py
index c04fa6e5a4..9a79507886 100644
--- a/google/cloud/spanner_v1/batch.py
+++ b/google/cloud/spanner_v1/batch.py
@@ -179,7 +179,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
def _make_write_pb(table, columns, values):
- """Helper for :meth:`Batch.insert` et aliae.
+ """Helper for :meth:`Batch.insert` et al.
:type table: str
:param table: Name of the table to be modified.
@@ -190,7 +190,7 @@ def _make_write_pb(table, columns, values):
:type values: list of lists
:param values: Values to be modified.
- :rtype: :class:`google.cloud.spanner_v1.Mutation.Write`
+ :rtype: :class:`google.cloud.spanner_v1.types.Mutation.Write`
:returns: Write protobuf
"""
return Mutation.Write(
diff --git a/google/cloud/spanner_v1/client.py b/google/cloud/spanner_v1/client.py
index f4cd6ef910..1b447cbfa8 100644
--- a/google/cloud/spanner_v1/client.py
+++ b/google/cloud/spanner_v1/client.py
@@ -108,12 +108,12 @@ class Client(ClientWithProject):
on the client. API Endpoint should be set through client_options.
:type query_options:
- :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions`
+ :class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions`
or :class:`dict`
:param query_options:
(Optional) Query optimizer configuration to use for the given query.
If a dict is provided, it must be of the same form as the protobuf
- message :class:`~google.cloud.spanner_v1.QueryOptions`
+ message :class:`~google.cloud.spanner_v1.types.QueryOptions`
:raises: :class:`ValueError ` if both ``read_only``
and ``admin`` are :data:`True`
@@ -348,7 +348,7 @@ def list_instances(self, filter_="", page_size=None):
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns:
- Iterator of :class:`~google.cloud.spanner_v1.instance.Instance`
+ Iterator of :class:`~google.cloud.spanner_admin_instance_v1.types.Instance`
resources within the client's project.
"""
metadata = _metadata_with_prefix(self.project_name)
diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py
index 92c797b987..1e76bf218f 100644
--- a/google/cloud/spanner_v1/database.py
+++ b/google/cloud/spanner_v1/database.py
@@ -26,6 +26,7 @@
from google.api_core.retry import if_exception_type
from google.cloud.exceptions import NotFound
from google.api_core.exceptions import Aborted
+from google.api_core import gapic_v1
import six
# pylint: disable=ungrouped-imports
@@ -47,6 +48,9 @@
SpannerGrpcTransport,
)
from google.cloud.spanner_admin_database_v1 import CreateDatabaseRequest
+from google.cloud.spanner_admin_database_v1 import EncryptionConfig
+from google.cloud.spanner_admin_database_v1 import RestoreDatabaseEncryptionConfig
+from google.cloud.spanner_admin_database_v1 import RestoreDatabaseRequest
from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest
from google.cloud.spanner_v1 import (
ExecuteSqlRequest,
@@ -103,17 +107,32 @@ class Database(object):
passed, the database will construct an instance of
:class:`~google.cloud.spanner_v1.pool.BurstyPool`.
- :type logger: `logging.Logger`
+ :type logger: :class:`logging.Logger`
:param logger: (Optional) a custom logger that is used if `log_commit_stats`
is `True` to log commit statistics. If not passed, a logger
will be created when needed that will log the commit statistics
to stdout.
+ :type encryption_config:
+ :class:`~google.cloud.spanner_admin_database_v1.types.EncryptionConfig`
+ or :class:`~google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig`
+ or :class:`dict`
+ :param encryption_config:
+ (Optional) Encryption configuration for the database.
+ If a dict is provided, it must be of the same form as either of the protobuf
+ messages :class:`~google.cloud.spanner_admin_database_v1.types.EncryptionConfig`
+ or :class:`~google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig`
"""
_spanner_api = None
def __init__(
- self, database_id, instance, ddl_statements=(), pool=None, logger=None
+ self,
+ database_id,
+ instance,
+ ddl_statements=(),
+ pool=None,
+ logger=None,
+ encryption_config=None,
):
self.database_id = database_id
self._instance = instance
@@ -124,8 +143,10 @@ def __init__(
self._restore_info = None
self._version_retention_period = None
self._earliest_version_time = None
+ self._encryption_info = None
self.log_commit_stats = False
self._logger = logger
+ self._encryption_config = encryption_config
if pool is None:
pool = BurstyPool()
@@ -138,7 +159,7 @@ def from_pb(cls, database_pb, instance, pool=None):
"""Creates an instance of this class from a protobuf.
:type database_pb:
- :class:`~google.cloud.spanner_admin_instance_v1.Instance`
+ :class:`~google.cloud.spanner_admin_instance_v1.types.Instance`
:param database_pb: A instance protobuf object.
:type instance: :class:`~google.cloud.spanner_v1.instance.Instance`
@@ -199,7 +220,7 @@ def name(self):
def state(self):
"""State of this database.
- :rtype: :class:`~google.cloud.spanner_admin_database_v1.Database.State`
+ :rtype: :class:`~google.cloud.spanner_admin_database_v1.types.Database.State`
:returns: an enum describing the state of the database
"""
return self._state
@@ -218,7 +239,7 @@ def create_time(self):
def restore_info(self):
"""Restore info for this database.
- :rtype: :class:`~google.cloud.spanner_v1.database.RestoreInfo`
+ :rtype: :class:`~google.cloud.spanner_v1.types.RestoreInfo`
:returns: an object representing the restore info for this database
"""
return self._restore_info
@@ -242,6 +263,22 @@ def earliest_version_time(self):
"""
return self._earliest_version_time
+ @property
+ def encryption_config(self):
+ """Encryption config for this database.
+ :rtype: :class:`~google.cloud.spanner_admin_instance_v1.types.EncryptionConfig`
+ :returns: an object representing the encryption config for this database
+ """
+ return self._encryption_config
+
+ @property
+ def encryption_info(self):
+ """Encryption info for this database.
+ :rtype: a list of :class:`~google.cloud.spanner_admin_instance_v1.types.EncryptionInfo`
+ :returns: a list of objects representing encryption info for this database
+ """
+ return self._encryption_info
+
@property
def ddl_statements(self):
"""DDL Statements used to define database schema.
@@ -310,7 +347,7 @@ def __ne__(self, other):
def create(self):
"""Create this database within its instance
- Inclues any configured schema assigned to :attr:`ddl_statements`.
+ Includes any configured schema assigned to :attr:`ddl_statements`.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase
@@ -325,11 +362,14 @@ def create(self):
db_name = self.database_id
if "-" in db_name:
db_name = "`%s`" % (db_name,)
+ if type(self._encryption_config) == dict:
+ self._encryption_config = EncryptionConfig(**self._encryption_config)
request = CreateDatabaseRequest(
parent=self._instance.name,
create_statement="CREATE DATABASE %s" % (db_name,),
extra_statements=list(self._ddl_statements),
+ encryption_config=self._encryption_config,
)
future = api.create_database(request=request, metadata=metadata)
return future
@@ -372,6 +412,8 @@ def reload(self):
self._restore_info = response.restore_info
self._version_retention_period = response.version_retention_period
self._earliest_version_time = response.earliest_version_time
+ self._encryption_config = response.encryption_config
+ self._encryption_info = response.encryption_info
def update_ddl(self, ddl_statements, operation_id=""):
"""Update DDL for this database.
@@ -429,12 +471,12 @@ def execute_partitioned_dml(
required if parameters are passed.
:type query_options:
- :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions`
+ :class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions`
or :class:`dict`
:param query_options:
(Optional) Query optimizer configuration to use for the given query.
If a dict is provided, it must be of the same form as the protobuf
- message :class:`~google.cloud.spanner_v1.QueryOptions`
+ message :class:`~google.cloud.spanner_v1.types.QueryOptions`
:rtype: int
:returns: Count of rows affected by the DML statement.
@@ -568,7 +610,7 @@ def run_in_transaction(self, func, *args, **kw):
:returns: The return value of ``func``.
:raises Exception:
- reraises any non-ABORT execptions raised by ``func``.
+ reraises any non-ABORT exceptions raised by ``func``.
"""
# Sanity check: Is there a transaction already running?
# If there is, then raise a red flag. Otherwise, mark that this one
@@ -588,8 +630,8 @@ def run_in_transaction(self, func, *args, **kw):
def restore(self, source):
"""Restore from a backup to this database.
- :type backup: :class:`~google.cloud.spanner_v1.backup.Backup`
- :param backup: the path of the backup being restored from.
+ :type source: :class:`~google.cloud.spanner_v1.backup.Backup`
+ :param source: the path of the source being restored from.
:rtype: :class:`~google.api_core.operation.Operation`
:returns: a future used to poll the status of the create request
@@ -601,14 +643,26 @@ def restore(self, source):
"""
if source is None:
raise ValueError("Restore source not specified")
+ if type(self._encryption_config) == dict:
+ self._encryption_config = RestoreDatabaseEncryptionConfig(
+ **self._encryption_config
+ )
+ if (
+ self.encryption_config
+ and self.encryption_config.kms_key_name
+ and self.encryption_config.encryption_type
+ != RestoreDatabaseEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION
+ ):
+ raise ValueError("kms_key_name only used with CUSTOMER_MANAGED_ENCRYPTION")
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
- future = api.restore_database(
+ request = RestoreDatabaseRequest(
parent=self._instance.name,
database_id=self.database_id,
backup=source.name,
- metadata=metadata,
+ encryption_config=self._encryption_config,
)
+ future = api.restore_database(request=request, metadata=metadata,)
return future
def is_ready(self):
@@ -862,6 +916,9 @@ def generate_read_batches(
index="",
partition_size_bytes=None,
max_partitions=None,
+ *,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
):
"""Start a partitioned batch read operation.
@@ -893,9 +950,15 @@ def generate_read_batches(
service uses this as a hint, the actual number of partitions may
differ.
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry: (Optional) The retry settings for this request.
+
+ :type timeout: float
+ :param timeout: (Optional) The timeout for this request.
+
:rtype: iterable of dict
:returns:
- mappings of information used peform actual partitioned reads via
+ mappings of information used perform actual partitioned reads via
:meth:`process_read_batch`.
"""
partitions = self._get_snapshot().partition_read(
@@ -905,6 +968,8 @@ def generate_read_batches(
index=index,
partition_size_bytes=partition_size_bytes,
max_partitions=max_partitions,
+ retry=retry,
+ timeout=timeout,
)
read_info = {
@@ -916,7 +981,9 @@ def generate_read_batches(
for partition in partitions:
yield {"partition": partition, "read": read_info.copy()}
- def process_read_batch(self, batch):
+ def process_read_batch(
+ self, batch, *, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT,
+ ):
"""Process a single, partitioned read.
:type batch: mapping
@@ -924,13 +991,22 @@ def process_read_batch(self, batch):
one of the mappings returned from an earlier call to
:meth:`generate_read_batches`.
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry: (Optional) The retry settings for this request.
+
+ :type timeout: float
+ :param timeout: (Optional) The timeout for this request.
+
+
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
"""
kwargs = copy.deepcopy(batch["read"])
keyset_dict = kwargs.pop("keyset")
kwargs["keyset"] = KeySet._from_dict(keyset_dict)
- return self._get_snapshot().read(partition=batch["partition"], **kwargs)
+ return self._get_snapshot().read(
+ partition=batch["partition"], **kwargs, retry=retry, timeout=timeout
+ )
def generate_query_batches(
self,
@@ -940,12 +1016,15 @@ def generate_query_batches(
partition_size_bytes=None,
max_partitions=None,
query_options=None,
+ *,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
):
"""Start a partitioned query operation.
Uses the ``PartitionQuery`` API request to start a partitioned
query operation. Returns a list of batch information needed to
- peform the actual queries.
+ perform the actual queries.
:type sql: str
:param sql: SQL query statement
@@ -976,16 +1055,22 @@ def generate_query_batches(
differ.
:type query_options:
- :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions`
+ :class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions`
or :class:`dict`
:param query_options:
(Optional) Query optimizer configuration to use for the given query.
If a dict is provided, it must be of the same form as the protobuf
- message :class:`~google.cloud.spanner_v1.QueryOptions`
+ message :class:`~google.cloud.spanner_v1.types.QueryOptions`
+
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry: (Optional) The retry settings for this request.
+
+ :type timeout: float
+ :param timeout: (Optional) The timeout for this request.
:rtype: iterable of dict
:returns:
- mappings of information used peform actual partitioned reads via
+ mappings of information used perform actual partitioned reads via
:meth:`process_read_batch`.
"""
partitions = self._get_snapshot().partition_query(
@@ -994,6 +1079,8 @@ def generate_query_batches(
param_types=param_types,
partition_size_bytes=partition_size_bytes,
max_partitions=max_partitions,
+ retry=retry,
+ timeout=timeout,
)
query_info = {"sql": sql}
@@ -1011,7 +1098,9 @@ def generate_query_batches(
for partition in partitions:
yield {"partition": partition, "query": query_info}
- def process_query_batch(self, batch):
+ def process_query_batch(
+ self, batch, *, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT,
+ ):
"""Process a single, partitioned query.
:type batch: mapping
@@ -1019,11 +1108,17 @@ def process_query_batch(self, batch):
one of the mappings returned from an earlier call to
:meth:`generate_query_batches`.
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry: (Optional) The retry settings for this request.
+
+ :type timeout: float
+ :param timeout: (Optional) The timeout for this request.
+
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
"""
return self._get_snapshot().execute_sql(
- partition=batch["partition"], **batch["query"]
+ partition=batch["partition"], **batch["query"], retry=retry, timeout=timeout
)
def process(self, batch):
@@ -1065,7 +1160,7 @@ def _check_ddl_statements(value):
https://cloud.google.com/spanner/docs/data-definition-language
:type value: list of string
- :param value: DDL statements, excluding the 'CREATE DATABSE' statement
+ :param value: DDL statements, excluding the 'CREATE DATABASE' statement
:rtype: tuple
:returns: tuple of validated DDL statement strings.
diff --git a/google/cloud/spanner_v1/instance.py b/google/cloud/spanner_v1/instance.py
index db729d9527..5a9cf95f5a 100644
--- a/google/cloud/spanner_v1/instance.py
+++ b/google/cloud/spanner_v1/instance.py
@@ -349,7 +349,7 @@ def delete(self):
Soon afterward:
- * The instance and all databases within the instance will be deleteed.
+ * The instance and all databases within the instance will be deleted.
All data in the databases will be permanently deleted.
"""
api = self._client.instance_admin_api
@@ -357,7 +357,14 @@ def delete(self):
api.delete_instance(name=self.name, metadata=metadata)
- def database(self, database_id, ddl_statements=(), pool=None, logger=None):
+ def database(
+ self,
+ database_id,
+ ddl_statements=(),
+ pool=None,
+ logger=None,
+ encryption_config=None,
+ ):
"""Factory to create a database within this instance.
:type database_id: str
@@ -365,23 +372,38 @@ def database(self, database_id, ddl_statements=(), pool=None, logger=None):
:type ddl_statements: list of string
:param ddl_statements: (Optional) DDL statements, excluding the
- 'CREATE DATABSE' statement.
+ 'CREATE DATABASE' statement.
:type pool: concrete subclass of
:class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`.
:param pool: (Optional) session pool to be used by database.
- :type logger: `logging.Logger`
+ :type logger: :class:`logging.Logger`
:param logger: (Optional) a custom logger that is used if `log_commit_stats`
is `True` to log commit statistics. If not passed, a logger
will be created when needed that will log the commit statistics
to stdout.
+ :type encryption_config:
+ :class:`~google.cloud.spanner_admin_database_v1.types.EncryptionConfig`
+ or :class:`~google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig`
+ or :class:`dict`
+ :param encryption_config:
+ (Optional) Encryption configuration for the database.
+ If a dict is provided, it must be of the same form as either of the protobuf
+ messages :class:`~google.cloud.spanner_admin_database_v1.types.EncryptionConfig`
+ or :class:`~google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig`
+
:rtype: :class:`~google.cloud.spanner_v1.database.Database`
:returns: a database owned by this instance.
"""
return Database(
- database_id, self, ddl_statements=ddl_statements, pool=pool, logger=logger
+ database_id,
+ self,
+ ddl_statements=ddl_statements,
+ pool=pool,
+ logger=logger,
+ encryption_config=encryption_config,
)
def list_databases(self, page_size=None):
@@ -398,7 +420,7 @@ def list_databases(self, page_size=None):
:rtype: :class:`~google.api._ore.page_iterator.Iterator`
:returns:
- Iterator of :class:`~google.cloud.spanner_v1.database.Database`
+ Iterator of :class:`~google.cloud.spanner_admin_database_v1.types.Database`
resources within the current instance.
"""
metadata = _metadata_with_prefix(self.name)
@@ -408,7 +430,14 @@ def list_databases(self, page_size=None):
)
return page_iter
- def backup(self, backup_id, database="", expire_time=None, version_time=None):
+ def backup(
+ self,
+ backup_id,
+ database="",
+ expire_time=None,
+ version_time=None,
+ encryption_config=None,
+ ):
"""Factory to create a backup within this instance.
:type backup_id: str
@@ -429,6 +458,17 @@ def backup(self, backup_id, database="", expire_time=None, version_time=None):
Optional. The version time that will be used to create the externally
consistent copy of the database. If not present, it is the same as
the `create_time` of the backup.
+
+ :type encryption_config:
+ :class:`~google.cloud.spanner_admin_database_v1.types.CreateBackupEncryptionConfig`
+ or :class:`dict`
+ :param encryption_config:
+ (Optional) Encryption configuration for the backup.
+ If a dict is provided, it must be of the same form as the protobuf
+ message :class:`~google.cloud.spanner_admin_database_v1.types.CreateBackupEncryptionConfig`
+
+ :rtype: :class:`~google.cloud.spanner_v1.backup.Backup`
+ :returns: a backup owned by this instance.
"""
try:
return Backup(
@@ -437,6 +477,7 @@ def backup(self, backup_id, database="", expire_time=None, version_time=None):
database=database.name,
expire_time=expire_time,
version_time=version_time,
+ encryption_config=encryption_config,
)
except AttributeError:
return Backup(
@@ -445,6 +486,7 @@ def backup(self, backup_id, database="", expire_time=None, version_time=None):
database=database,
expire_time=expire_time,
version_time=version_time,
+ encryption_config=encryption_config,
)
def list_backups(self, filter_="", page_size=None):
@@ -462,7 +504,7 @@ def list_backups(self, filter_="", page_size=None):
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns:
- Iterator of :class:`~google.cloud.spanner_v1.backup.Backup`
+ Iterator of :class:`~google.cloud.spanner_admin_database_v1.types.Backup`
resources within the current instance.
"""
metadata = _metadata_with_prefix(self.name)
diff --git a/google/cloud/spanner_v1/keyset.py b/google/cloud/spanner_v1/keyset.py
index 269bb12f05..ab712219f0 100644
--- a/google/cloud/spanner_v1/keyset.py
+++ b/google/cloud/spanner_v1/keyset.py
@@ -68,7 +68,7 @@ def __init__(
def _to_pb(self):
"""Construct a KeyRange protobuf.
- :rtype: :class:`~google.cloud.spanner_v1.KeyRange`
+ :rtype: :class:`~google.cloud.spanner_v1.types.KeyRange`
:returns: protobuf corresponding to this instance.
"""
kwargs = {}
@@ -88,7 +88,7 @@ def _to_pb(self):
return KeyRangePB(**kwargs)
def _to_dict(self):
- """Return keyrange's state as a dict.
+ """Return the state of the keyrange as a dict.
:rtype: dict
:returns: state of this instance.
@@ -139,7 +139,7 @@ def __init__(self, keys=(), ranges=(), all_=False):
def _to_pb(self):
"""Construct a KeySet protobuf.
- :rtype: :class:`~google.cloud.spanner_v1.KeySet`
+ :rtype: :class:`~google.cloud.spanner_v1.types.KeySet`
:returns: protobuf corresponding to this instance.
"""
if self.all_:
@@ -155,7 +155,7 @@ def _to_pb(self):
return KeySetPB(**kwargs)
def _to_dict(self):
- """Return keyset's state as a dict.
+ """Return the state of the keyset as a dict.
The result can be used to serialize the instance and reconstitute
it later using :meth:`_from_dict`.
diff --git a/google/cloud/spanner_v1/param_types.py b/google/cloud/spanner_v1/param_types.py
index 8ec5ac7ace..c5a106d0aa 100644
--- a/google/cloud/spanner_v1/param_types.py
+++ b/google/cloud/spanner_v1/param_types.py
@@ -33,10 +33,10 @@
def Array(element_type): # pylint: disable=invalid-name
"""Construct an array parameter type description protobuf.
- :type element_type: :class:`~google.cloud.spanner_v1.Type`
+ :type element_type: :class:`~google.cloud.spanner_v1.types.Type`
:param element_type: the type of elements of the array
- :rtype: :class:`google.cloud.spanner_v1.Type`
+ :rtype: :class:`google.cloud.spanner_v1.types.Type`
:returns: the appropriate array-type protobuf
"""
return Type(code=TypeCode.ARRAY, array_element_type=element_type)
@@ -48,10 +48,10 @@ def StructField(name, field_type): # pylint: disable=invalid-name
:type name: str
:param name: the name of the field
- :type field_type: :class:`google.cloud.spanner_v1.Type`
+ :type field_type: :class:`google.cloud.spanner_v1.types.Type`
:param field_type: the type of the field
- :rtype: :class:`google.cloud.spanner_v1.StructType.Field`
+ :rtype: :class:`google.cloud.spanner_v1.types.StructType.Field`
:returns: the appropriate struct-field-type protobuf
"""
return StructType.Field(name=name, type_=field_type)
@@ -60,7 +60,7 @@ def StructField(name, field_type): # pylint: disable=invalid-name
def Struct(fields): # pylint: disable=invalid-name
"""Construct a struct parameter type description protobuf.
- :type fields: list of :class:`google.cloud.spanner_v1.StructType.Field`
+ :type fields: list of :class:`google.cloud.spanner_v1.types.StructType.Field`
:param fields: the fields of the struct
:rtype: :class:`type_pb2.Type`
diff --git a/google/cloud/spanner_v1/pool.py b/google/cloud/spanner_v1/pool.py
index 112c277c86..4e20a42c4c 100644
--- a/google/cloud/spanner_v1/pool.py
+++ b/google/cloud/spanner_v1/pool.py
@@ -42,7 +42,7 @@ def __init__(self, labels=None):
@property
def labels(self):
- """User-assigned labels for sesions created by the pool.
+ """User-assigned labels for sessions created by the pool.
:rtype: dict (str -> str)
:returns: labels assigned by the user
@@ -53,7 +53,7 @@ def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
- :param database: database used by the pool: used to create sessions
+ :param database: database used by the pool to create sessions
when needed.
Concrete implementations of this method may pre-fill the pool
@@ -162,7 +162,7 @@ def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
- :param database: database used by the pool: used to create sessions
+ :param database: database used by the pool to used to create sessions
when needed.
"""
self._database = database
@@ -256,7 +256,7 @@ def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
- :param database: database used by the pool: used to create sessions
+ :param database: database used by the pool to create sessions
when needed.
"""
self._database = database
@@ -354,7 +354,7 @@ def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
- :param database: database used by the pool: used to create sessions
+ :param database: database used by the pool to create sessions
when needed.
"""
self._database = database
@@ -486,7 +486,7 @@ def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
- :param database: database used by the pool: used to create sessions
+ :param database: database used by the pool to create sessions
when needed.
"""
super(TransactionPingingPool, self).bind(database)
diff --git a/google/cloud/spanner_v1/services/spanner/client.py b/google/cloud/spanner_v1/services/spanner/client.py
index 691543a984..387be03369 100644
--- a/google/cloud/spanner_v1/services/spanner/client.py
+++ b/google/cloud/spanner_v1/services/spanner/client.py
@@ -1327,12 +1327,11 @@ def commit(
request.session = session
if transaction_id is not None:
request.transaction_id = transaction_id
+ if mutations is not None:
+ request.mutations = mutations
if single_use_transaction is not None:
request.single_use_transaction = single_use_transaction
- if mutations:
- request.mutations.extend(mutations)
-
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.commit]
diff --git a/google/cloud/spanner_v1/services/spanner/pagers.py b/google/cloud/spanner_v1/services/spanner/pagers.py
index e98fda11c7..e33003b4f5 100644
--- a/google/cloud/spanner_v1/services/spanner/pagers.py
+++ b/google/cloud/spanner_v1/services/spanner/pagers.py
@@ -15,7 +15,16 @@
# limitations under the License.
#
-from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple
+from typing import (
+ Any,
+ AsyncIterable,
+ Awaitable,
+ Callable,
+ Iterable,
+ Sequence,
+ Tuple,
+ Optional,
+)
from google.cloud.spanner_v1.types import spanner
diff --git a/google/cloud/spanner_v1/session.py b/google/cloud/spanner_v1/session.py
index 4bec436d7d..1321308ace 100644
--- a/google/cloud/spanner_v1/session.py
+++ b/google/cloud/spanner_v1/session.py
@@ -243,21 +243,27 @@ def execute_sql(
the names used in ``sql``.
:type param_types:
- dict, {str -> :class:`~google.spanner.v1.type_pb2.TypeCode`}
+ dict, {str -> :class:`~google.spanner.v1.types.TypeCode`}
:param param_types: (Optional) explicit types for one or more param
values; overrides default type detection on the
back-end.
:type query_mode:
- :class:`~google.spanner.v1.spanner_pb2.ExecuteSqlRequest.QueryMode`
+ :class:`~google.spanner.v1.types.ExecuteSqlRequest.QueryMode`
:param query_mode: Mode governing return of results / query plan. See:
`QueryMode `_.
:type query_options:
- :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions`
+ :class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions`
or :class:`dict`
:param query_options: (Optional) Options that are provided for query plan stability.
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry: (Optional) The retry settings for this request.
+
+ :type timeout: float
+ :param timeout: (Optional) The timeout for this request.
+
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
"""
@@ -321,7 +327,7 @@ def run_in_transaction(self, func, *args, **kw):
:returns: The return value of ``func``.
:raises Exception:
- reraises any non-ABORT execptions raised by ``func``.
+ reraises any non-ABORT exceptions raised by ``func``.
"""
deadline = time.time() + kw.pop("timeout_secs", DEFAULT_RETRY_TIMEOUT_SECS)
attempts = 0
diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py
index d417bfd1f1..1b3ae8097d 100644
--- a/google/cloud/spanner_v1/snapshot.py
+++ b/google/cloud/spanner_v1/snapshot.py
@@ -27,7 +27,7 @@
from google.api_core.exceptions import InternalServerError
from google.api_core.exceptions import ServiceUnavailable
-import google.api_core.gapic_v1.method
+from google.api_core import gapic_v1
from google.cloud.spanner_v1._helpers import _make_value_pb
from google.cloud.spanner_v1._helpers import _merge_query_options
from google.cloud.spanner_v1._helpers import _metadata_with_prefix
@@ -109,7 +109,18 @@ def _make_txn_selector(self): # pylint: disable=redundant-returns-doc
"""
raise NotImplementedError
- def read(self, table, columns, keyset, index="", limit=0, partition=None):
+ def read(
+ self,
+ table,
+ columns,
+ keyset,
+ index="",
+ limit=0,
+ partition=None,
+ *,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
+ ):
"""Perform a ``StreamingRead`` API request for rows in a table.
:type table: str
@@ -134,6 +145,12 @@ def read(self, table, columns, keyset, index="", limit=0, partition=None):
from :meth:`partition_read`. Incompatible with
``limit``.
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry: (Optional) The retry settings for this request.
+
+ :type timeout: float
+ :param timeout: (Optional) The timeout for this request.
+
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
@@ -163,7 +180,11 @@ def read(self, table, columns, keyset, index="", limit=0, partition=None):
partition_token=partition,
)
restart = functools.partial(
- api.streaming_read, request=request, metadata=metadata,
+ api.streaming_read,
+ request=request,
+ metadata=metadata,
+ retry=retry,
+ timeout=timeout,
)
trace_attributes = {"table_id": table, "columns": columns}
@@ -186,8 +207,8 @@ def execute_sql(
query_mode=None,
query_options=None,
partition=None,
- retry=google.api_core.gapic_v1.method.DEFAULT,
- timeout=google.api_core.gapic_v1.method.DEFAULT,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
):
"""Perform an ``ExecuteStreamingSql`` API request.
@@ -204,18 +225,18 @@ def execute_sql(
required if parameters are passed.
:type query_mode:
- :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryMode`
+ :class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryMode`
:param query_mode: Mode governing return of results / query plan.
See:
`QueryMode `_.
:type query_options:
- :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions`
+ :class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions`
or :class:`dict`
:param query_options:
(Optional) Query optimizer configuration to use for the given query.
If a dict is provided, it must be of the same form as the protobuf
- message :class:`~google.cloud.spanner_v1.QueryOptions`
+ message :class:`~google.cloud.spanner_v1.types.QueryOptions`
:type partition: bytes
:param partition: (Optional) one of the partition tokens returned
@@ -224,6 +245,12 @@ def execute_sql(
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry: (Optional) The retry settings for this request.
+
+ :type timeout: float
+ :param timeout: (Optional) The timeout for this request.
+
:raises ValueError:
for reuse of single-use snapshots, or if a transaction ID is
already pending for multiple-use snapshots.
@@ -296,8 +323,11 @@ def partition_read(
index="",
partition_size_bytes=None,
max_partitions=None,
+ *,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
):
- """Perform a ``ParitionRead`` API request for rows in a table.
+ """Perform a ``PartitionRead`` API request for rows in a table.
:type table: str
:param table: name of the table from which to fetch data
@@ -323,12 +353,18 @@ def partition_read(
service uses this as a hint, the actual number of partitions may
differ.
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry: (Optional) The retry settings for this request.
+
+ :type timeout: float
+ :param timeout: (Optional) The timeout for this request.
+
:rtype: iterable of bytes
:returns: a sequence of partition tokens
:raises ValueError:
for single-use snapshots, or if a transaction ID is
- already associtated with the snapshot.
+ already associated with the snapshot.
"""
if not self._multi_use:
raise ValueError("Cannot use single-use snapshot.")
@@ -357,7 +393,9 @@ def partition_read(
with trace_call(
"CloudSpanner.PartitionReadOnlyTransaction", self._session, trace_attributes
):
- response = api.partition_read(request=request, metadata=metadata,)
+ response = api.partition_read(
+ request=request, metadata=metadata, retry=retry, timeout=timeout,
+ )
return [partition.partition_token for partition in response.partitions]
@@ -368,8 +406,11 @@ def partition_query(
param_types=None,
partition_size_bytes=None,
max_partitions=None,
+ *,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
):
- """Perform a ``ParitionQuery`` API request.
+ """Perform a ``PartitionQuery`` API request.
:type sql: str
:param sql: SQL query statement
@@ -394,12 +435,18 @@ def partition_query(
service uses this as a hint, the actual number of partitions may
differ.
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry: (Optional) The retry settings for this request.
+
+ :type timeout: float
+ :param timeout: (Optional) The timeout for this request.
+
:rtype: iterable of bytes
:returns: a sequence of partition tokens
:raises ValueError:
for single-use snapshots, or if a transaction ID is
- already associtated with the snapshot.
+ already associated with the snapshot.
"""
if not self._multi_use:
raise ValueError("Cannot use single-use snapshot.")
@@ -438,7 +485,9 @@ def partition_query(
self._session,
trace_attributes,
):
- response = api.partition_query(request=request, metadata=metadata,)
+ response = api.partition_query(
+ request=request, metadata=metadata, retry=retry, timeout=timeout,
+ )
return [partition.partition_token for partition in response.partitions]
diff --git a/google/cloud/spanner_v1/streamed.py b/google/cloud/spanner_v1/streamed.py
index 88677f668b..fbcca77795 100644
--- a/google/cloud/spanner_v1/streamed.py
+++ b/google/cloud/spanner_v1/streamed.py
@@ -33,7 +33,7 @@ class StreamedResultSet(object):
:type response_iterator:
:param response_iterator:
Iterator yielding
- :class:`~google.cloud.spanner_v1.PartialResultSet`
+ :class:`~google.cloud.spanner_v1.types.PartialResultSet`
instances.
:type source: :class:`~google.cloud.spanner_v1.snapshot.Snapshot`
@@ -53,7 +53,7 @@ def __init__(self, response_iterator, source=None):
def fields(self):
"""Field descriptors for result set columns.
- :rtype: list of :class:`~google.cloud.spanner_v1.StructType.Field`
+ :rtype: list of :class:`~google.cloud.spanner_v1.types.StructType.Field`
:returns: list of fields describing column names / types.
"""
return self._metadata.row_type.fields
@@ -62,7 +62,7 @@ def fields(self):
def metadata(self):
"""Result set metadata
- :rtype: :class:`~google.cloud.spanner_v1.ResultSetMetadata`
+ :rtype: :class:`~google.cloud.spanner_v1.types.ResultSetMetadata`
:returns: structure describing the results
"""
return self._metadata
@@ -72,7 +72,7 @@ def stats(self):
"""Result set statistics
:rtype:
- :class:`~google.cloud.spanner_v1.ResultSetStats`
+ :class:`~google.cloud.spanner_v1.types.ResultSetStats`
:returns: structure describing status about the response
"""
return self._stats
@@ -201,7 +201,7 @@ class Unmergeable(ValueError):
:type rhs: :class:`~google.protobuf.struct_pb2.Value`
:param rhs: remaining value to be merged
- :type type_: :class:`~google.cloud.spanner_v1.Type`
+ :type type_: :class:`~google.cloud.spanner_v1.types.Type`
:param type_: field type of values being merged
"""
@@ -258,13 +258,17 @@ def _merge_array(lhs, rhs, type_):
lhs.append(first)
else:
last = lhs.pop()
- try:
- merged = _merge_by_type(last, first, element_type)
- except Unmergeable:
+ if last.HasField("null_value"):
lhs.append(last)
lhs.append(first)
else:
- lhs.append(merged)
+ try:
+ merged = _merge_by_type(last, first, element_type)
+ except Unmergeable:
+ lhs.append(last)
+ lhs.append(first)
+ else:
+ lhs.append(merged)
return Value(list_value=ListValue(values=(lhs + rhs)))
@@ -284,13 +288,17 @@ def _merge_struct(lhs, rhs, type_):
lhs.append(first)
else:
last = lhs.pop()
- try:
- merged = _merge_by_type(last, first, candidate_type)
- except Unmergeable:
+ if last.HasField("null_value"):
lhs.append(last)
lhs.append(first)
else:
- lhs.append(merged)
+ try:
+ merged = _merge_by_type(last, first, candidate_type)
+ except Unmergeable:
+ lhs.append(last)
+ lhs.append(first)
+ else:
+ lhs.append(merged)
return Value(list_value=ListValue(values=lhs + rhs))
diff --git a/google/cloud/spanner_v1/transaction.py b/google/cloud/spanner_v1/transaction.py
index aa2353206f..4c99b26a09 100644
--- a/google/cloud/spanner_v1/transaction.py
+++ b/google/cloud/spanner_v1/transaction.py
@@ -29,6 +29,7 @@
from google.cloud.spanner_v1.snapshot import _SnapshotBase
from google.cloud.spanner_v1.batch import _BatchBase
from google.cloud.spanner_v1._opentelemetry_tracing import trace_call
+from google.api_core import gapic_v1
class Transaction(_SnapshotBase, _BatchBase):
@@ -185,7 +186,15 @@ def _make_params_pb(params, param_types):
return {}
def execute_update(
- self, dml, params=None, param_types=None, query_mode=None, query_options=None
+ self,
+ dml,
+ params=None,
+ param_types=None,
+ query_mode=None,
+ query_options=None,
+ *,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
):
"""Perform an ``ExecuteSql`` API request with DML.
@@ -202,16 +211,22 @@ def execute_update(
required if parameters are passed.
:type query_mode:
- :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryMode`
+ :class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryMode`
:param query_mode: Mode governing return of results / query plan.
See:
`QueryMode `_.
:type query_options:
- :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions`
+ :class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions`
or :class:`dict`
:param query_options: (Optional) Options that are provided for query plan stability.
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry: (Optional) The retry settings for this request.
+
+ :type timeout: float
+ :param timeout: (Optional) The timeout for this request.
+
:rtype: int
:returns: Count of rows affected by the DML statement.
"""
@@ -245,7 +260,9 @@ def execute_update(
with trace_call(
"CloudSpanner.ReadWriteTransaction", self._session, trace_attributes
):
- response = api.execute_sql(request=request, metadata=metadata)
+ response = api.execute_sql(
+ request=request, metadata=metadata, retry=retry, timeout=timeout
+ )
return response.stats.row_count_exact
def batch_update(self, statements):
@@ -266,7 +283,7 @@ def batch_update(self, statements):
Tuple(status, Sequence[int])
:returns:
Status code, plus counts of rows affected by each completed DML
- statement. Note that if the staus code is not ``OK``, the
+ statement. Note that if the status code is not ``OK``, the
statement triggering the error will not have an entry in the
list, nor will any statements following that one.
"""
diff --git a/google/cloud/spanner_v1/types/transaction.py b/google/cloud/spanner_v1/types/transaction.py
index bcbbddd72c..e20c6ad7b4 100644
--- a/google/cloud/spanner_v1/types/transaction.py
+++ b/google/cloud/spanner_v1/types/transaction.py
@@ -34,7 +34,7 @@ class TransactionOptions(proto.Message):
For more info, see: https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction
Attributes:
- read_write (~.transaction.TransactionOptions.ReadWrite):
+ read_write (google.cloud.spanner_v1.types.TransactionOptions.ReadWrite):
Transaction may write.
Authorization to begin a read-write transaction requires
diff --git a/renovate.json b/renovate.json
index 4fa949311b..f08bc22c9a 100644
--- a/renovate.json
+++ b/renovate.json
@@ -1,5 +1,6 @@
{
"extends": [
"config:base", ":preserveSemverRanges"
- ]
+ ],
+ "ignorePaths": [".pre-commit-config.yaml"]
}
diff --git a/samples/samples/autocommit_test.py b/samples/samples/autocommit_test.py
index a98744968a..c9631516fa 100644
--- a/samples/samples/autocommit_test.py
+++ b/samples/samples/autocommit_test.py
@@ -8,8 +8,6 @@
from google.api_core.exceptions import Aborted
from google.cloud import spanner
-from google.cloud.spanner_dbapi import connect
-import mock
import pytest
from test_utils.retry import RetryErrors
@@ -53,13 +51,13 @@ def database(spanner_instance):
@RetryErrors(exception=Aborted, max_tries=2)
def test_enable_autocommit_mode(capsys, database):
- connection = connect(INSTANCE_ID, DATABASE_ID)
- cursor = connection.cursor()
-
- with mock.patch(
- "google.cloud.spanner_dbapi.connection.Cursor", return_value=cursor,
- ):
- autocommit.enable_autocommit_mode(INSTANCE_ID, DATABASE_ID)
- out, _ = capsys.readouterr()
- assert "Autocommit mode is enabled." in out
- assert "SingerId: 13, AlbumId: Russell, AlbumTitle: Morales" in out
+ # Delete table if it exists for retry attempts.
+ table = database.table('Singers')
+ if table.exists():
+ op = database.update_ddl(["DROP TABLE Singers"])
+ op.result()
+
+ autocommit.enable_autocommit_mode(INSTANCE_ID, DATABASE_ID)
+ out, _ = capsys.readouterr()
+ assert "Autocommit mode is enabled." in out
+ assert "SingerId: 13, AlbumId: Russell, AlbumTitle: Morales" in out
diff --git a/samples/samples/noxfile.py b/samples/samples/noxfile.py
index 01686e4a03..97bf7da80e 100644
--- a/samples/samples/noxfile.py
+++ b/samples/samples/noxfile.py
@@ -17,6 +17,7 @@
import os
from pathlib import Path
import sys
+from typing import Callable, Dict, List, Optional
import nox
@@ -39,6 +40,10 @@
# You can opt out from the test for specific Python versions.
'ignored_versions': ["2.7"],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ 'enforce_type_hints': False,
+
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
@@ -64,7 +69,7 @@
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
-def get_pytest_env_vars():
+def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
@@ -80,7 +85,7 @@ def get_pytest_env_vars():
# DO NOT EDIT - automatically generated.
# All versions used to tested samples.
-ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
+ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG['ignored_versions']
@@ -93,7 +98,7 @@ def get_pytest_env_vars():
#
-def _determine_local_import_names(start_dir):
+def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
@@ -131,8 +136,11 @@ def _determine_local_import_names(start_dir):
@nox.session
-def lint(session):
- session.install("flake8", "flake8-import-order")
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG['enforce_type_hints']:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
@@ -141,7 +149,17 @@ def lint(session):
"."
]
session.run("flake8", *args)
+#
+# Black
+#
+
+
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ session.install("black")
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+ session.run("black", *python_files)
#
# Sample Tests
@@ -151,7 +169,7 @@ def lint(session):
PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
-def _session_tests(session, post_install=None):
+def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None:
"""Runs py.test for a particular project."""
if os.path.exists("requirements.txt"):
session.install("-r", "requirements.txt")
@@ -177,7 +195,7 @@ def _session_tests(session, post_install=None):
@nox.session(python=ALL_VERSIONS)
-def py(session):
+def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
@@ -192,7 +210,7 @@ def py(session):
#
-def _get_repo_root():
+def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
@@ -215,7 +233,7 @@ def _get_repo_root():
@nox.session
@nox.parametrize("path", GENERATED_READMES)
-def readmegen(session, path):
+def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
diff --git a/samples/samples/requirements.txt b/samples/samples/requirements.txt
index 43919b8c73..6e3d3ae986 100644
--- a/samples/samples/requirements.txt
+++ b/samples/samples/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-spanner==3.1.0
+google-cloud-spanner==3.2.0
futures==3.3.0; python_version < "3"
diff --git a/scripts/fixup_spanner_admin_database_v1_keywords.py b/scripts/fixup_spanner_admin_database_v1_keywords.py
index 96334a9f32..7eb3062dce 100644
--- a/scripts/fixup_spanner_admin_database_v1_keywords.py
+++ b/scripts/fixup_spanner_admin_database_v1_keywords.py
@@ -41,8 +41,8 @@ def partition(
class spanner_admin_databaseCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
- 'create_backup': ('parent', 'backup_id', 'backup', ),
- 'create_database': ('parent', 'create_statement', 'extra_statements', ),
+ 'create_backup': ('parent', 'backup_id', 'backup', 'encryption_config', ),
+ 'create_database': ('parent', 'create_statement', 'extra_statements', 'encryption_config', ),
'delete_backup': ('name', ),
'drop_database': ('database', ),
'get_backup': ('name', ),
@@ -53,7 +53,7 @@ class spanner_admin_databaseCallTransformer(cst.CSTTransformer):
'list_backups': ('parent', 'filter', 'page_size', 'page_token', ),
'list_database_operations': ('parent', 'filter', 'page_size', 'page_token', ),
'list_databases': ('parent', 'page_size', 'page_token', ),
- 'restore_database': ('parent', 'database_id', 'backup', ),
+ 'restore_database': ('parent', 'database_id', 'backup', 'encryption_config', ),
'set_iam_policy': ('resource', 'policy', ),
'test_iam_permissions': ('resource', 'permissions', ),
'update_backup': ('backup', 'update_mask', ),
diff --git a/setup.py b/setup.py
index bf03af8fdb..4c0d844572 100644
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
name = "google-cloud-spanner"
description = "Cloud Spanner API client library"
-version = "3.2.0"
+version = "3.3.0"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
@@ -33,7 +33,7 @@
"google-cloud-core >= 1.4.1, < 2.0dev",
"grpc-google-iam-v1 >= 0.12.3, < 0.13dev",
"libcst >= 0.2.5",
- "proto-plus==1.13.0",
+ "proto-plus >= 1.11.0",
"sqlparse >= 0.3.0",
]
extras = {
diff --git a/synth.metadata b/synth.metadata
index 8e7ae4d697..72c4d0ff71 100644
--- a/synth.metadata
+++ b/synth.metadata
@@ -4,29 +4,29 @@
"git": {
"name": ".",
"remote": "https://github.com/googleapis/python-spanner.git",
- "sha": "be27507c51998e5a4aec54cab57515c4912f5ed5"
+ "sha": "5ca63407847ad615dc51beaaaa7f16640daf0e23"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "20712b8fe95001b312f62c6c5f33e3e3ec92cfaf",
- "internalRef": "354996675"
+ "sha": "f829b1334cce86aa3738f3c0698d814b56664445",
+ "internalRef": "358725120"
}
},
{
"git": {
"name": "synthtool",
"remote": "https://github.com/googleapis/synthtool.git",
- "sha": "a783321fd55f010709294455584a553f4b24b944"
+ "sha": "f5c5904fb0c6aa3b3730eadf4e5a4485afc65726"
}
},
{
"git": {
"name": "synthtool",
"remote": "https://github.com/googleapis/synthtool.git",
- "sha": "a783321fd55f010709294455584a553f4b24b944"
+ "sha": "f5c5904fb0c6aa3b3730eadf4e5a4485afc65726"
}
}
],
@@ -66,6 +66,7 @@
".github/ISSUE_TEMPLATE/feature_request.md",
".github/ISSUE_TEMPLATE/support_request.md",
".github/PULL_REQUEST_TEMPLATE.md",
+ ".github/header-checker-lint.yml",
".github/release-please.yml",
".github/snippet-bot.yml",
".gitignore",
@@ -90,19 +91,25 @@
".kokoro/samples/lint/presubmit.cfg",
".kokoro/samples/python3.6/common.cfg",
".kokoro/samples/python3.6/continuous.cfg",
+ ".kokoro/samples/python3.6/periodic-head.cfg",
".kokoro/samples/python3.6/periodic.cfg",
".kokoro/samples/python3.6/presubmit.cfg",
".kokoro/samples/python3.7/common.cfg",
".kokoro/samples/python3.7/continuous.cfg",
+ ".kokoro/samples/python3.7/periodic-head.cfg",
".kokoro/samples/python3.7/periodic.cfg",
".kokoro/samples/python3.7/presubmit.cfg",
".kokoro/samples/python3.8/common.cfg",
".kokoro/samples/python3.8/continuous.cfg",
+ ".kokoro/samples/python3.8/periodic-head.cfg",
".kokoro/samples/python3.8/periodic.cfg",
".kokoro/samples/python3.8/presubmit.cfg",
+ ".kokoro/test-samples-against-head.sh",
+ ".kokoro/test-samples-impl.sh",
".kokoro/test-samples.sh",
".kokoro/trampoline.sh",
".kokoro/trampoline_v2.sh",
+ ".pre-commit-config.yaml",
".trampolinerc",
"CODE_OF_CONDUCT.md",
"CONTRIBUTING.rst",
diff --git a/tests/system/test_system.py b/tests/system/test_system.py
index 575f79746e..7a7630c0d9 100644
--- a/tests/system/test_system.py
+++ b/tests/system/test_system.py
@@ -738,6 +738,12 @@ def test_create_invalid(self):
op.result()
def test_backup_workflow(self):
+ from google.cloud.spanner_admin_database_v1 import (
+ CreateBackupEncryptionConfig,
+ EncryptionConfig,
+ EncryptionInfo,
+ RestoreDatabaseEncryptionConfig,
+ )
from datetime import datetime
from datetime import timedelta
from pytz import UTC
@@ -746,6 +752,9 @@ def test_backup_workflow(self):
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
+ encryption_config = CreateBackupEncryptionConfig(
+ encryption_type=CreateBackupEncryptionConfig.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION,
+ )
# Create backup.
backup = instance.backup(
@@ -753,6 +762,7 @@ def test_backup_workflow(self):
database=self._db,
expire_time=expire_time,
version_time=self.database_version_time,
+ encryption_config=encryption_config,
)
operation = backup.create()
self.to_delete.append(backup)
@@ -771,6 +781,10 @@ def test_backup_workflow(self):
self.assertEqual(self.database_version_time, backup.version_time)
self.assertIsNotNone(backup.size_bytes)
self.assertIsNotNone(backup.state)
+ self.assertEqual(
+ EncryptionInfo.Type.GOOGLE_DEFAULT_ENCRYPTION,
+ backup.encryption_info.encryption_type,
+ )
# Update with valid argument.
valid_expire_time = datetime.utcnow() + timedelta(days=7)
@@ -780,7 +794,10 @@ def test_backup_workflow(self):
# Restore database to same instance.
restored_id = "restored_db" + unique_resource_id("_")
- database = instance.database(restored_id)
+ encryption_config = RestoreDatabaseEncryptionConfig(
+ encryption_type=RestoreDatabaseEncryptionConfig.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION,
+ )
+ database = instance.database(restored_id, encryption_config=encryption_config)
self.to_drop.append(database)
operation = database.restore(source=backup)
restored_db = operation.result()
@@ -791,6 +808,9 @@ def test_backup_workflow(self):
metadata = operation.metadata
self.assertEqual(self.database_version_time, metadata.backup_info.version_time)
+ database.reload()
+ expected_encryption_config = EncryptionConfig()
+ self.assertEqual(expected_encryption_config, database.encryption_config)
database.drop()
backup.delete()
diff --git a/tests/system/test_system_dbapi.py b/tests/system/test_system_dbapi.py
index 1659fe239b..6ca1029ae1 100644
--- a/tests/system/test_system_dbapi.py
+++ b/tests/system/test_system_dbapi.py
@@ -378,6 +378,54 @@ def test_execute_many(self):
self.assertEqual(res[0], 1)
conn.close()
+ def test_DDL_autocommit(self):
+ """Check that DDLs in autocommit mode are immediately executed."""
+ conn = Connection(Config.INSTANCE, self._db)
+ conn.autocommit = True
+
+ cur = conn.cursor()
+ cur.execute(
+ """
+ CREATE TABLE Singers (
+ SingerId INT64 NOT NULL,
+ Name STRING(1024),
+ ) PRIMARY KEY (SingerId)
+ """
+ )
+ conn.close()
+
+ # if previous DDL wasn't committed, the next DROP TABLE
+ # statement will fail with a ProgrammingError
+ conn = Connection(Config.INSTANCE, self._db)
+ cur = conn.cursor()
+
+ cur.execute("DROP TABLE Singers")
+ conn.commit()
+
+ def test_DDL_commit(self):
+ """Check that DDLs in commit mode are executed on calling `commit()`."""
+ conn = Connection(Config.INSTANCE, self._db)
+ cur = conn.cursor()
+
+ cur.execute(
+ """
+ CREATE TABLE Singers (
+ SingerId INT64 NOT NULL,
+ Name STRING(1024),
+ ) PRIMARY KEY (SingerId)
+ """
+ )
+ conn.commit()
+ conn.close()
+
+ # if previous DDL wasn't committed, the next DROP TABLE
+ # statement will fail with a ProgrammingError
+ conn = Connection(Config.INSTANCE, self._db)
+ cur = conn.cursor()
+
+ cur.execute("DROP TABLE Singers")
+ conn.commit()
+
def clear_table(transaction):
"""Clear the test table."""
diff --git a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py
index ebe241df35..86eba5e283 100644
--- a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py
+++ b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py
@@ -45,6 +45,7 @@
from google.cloud.spanner_admin_database_v1.services.database_admin import transports
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import common
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore
from google.iam.v1 import options_pb2 as options # type: ignore
@@ -52,8 +53,10 @@
from google.longrunning import operations_pb2
from google.longrunning import operations_pb2 as operations # type: ignore
from google.oauth2 import service_account
+from google.protobuf import any_pb2 as gp_any # type: ignore
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.rpc import status_pb2 as status # type: ignore
from google.type import expr_pb2 as expr # type: ignore
@@ -4950,10 +4953,74 @@ def test_parse_backup_path():
assert expected == actual
-def test_database_path():
+def test_crypto_key_path():
project = "cuttlefish"
- instance = "mussel"
- database = "winkle"
+ location = "mussel"
+ key_ring = "winkle"
+ crypto_key = "nautilus"
+
+ expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format(
+ project=project, location=location, key_ring=key_ring, crypto_key=crypto_key,
+ )
+ actual = DatabaseAdminClient.crypto_key_path(
+ project, location, key_ring, crypto_key
+ )
+ assert expected == actual
+
+
+def test_parse_crypto_key_path():
+ expected = {
+ "project": "scallop",
+ "location": "abalone",
+ "key_ring": "squid",
+ "crypto_key": "clam",
+ }
+ path = DatabaseAdminClient.crypto_key_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = DatabaseAdminClient.parse_crypto_key_path(path)
+ assert expected == actual
+
+
+def test_crypto_key_version_path():
+ project = "whelk"
+ location = "octopus"
+ key_ring = "oyster"
+ crypto_key = "nudibranch"
+ crypto_key_version = "cuttlefish"
+
+ expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format(
+ project=project,
+ location=location,
+ key_ring=key_ring,
+ crypto_key=crypto_key,
+ crypto_key_version=crypto_key_version,
+ )
+ actual = DatabaseAdminClient.crypto_key_version_path(
+ project, location, key_ring, crypto_key, crypto_key_version
+ )
+ assert expected == actual
+
+
+def test_parse_crypto_key_version_path():
+ expected = {
+ "project": "mussel",
+ "location": "winkle",
+ "key_ring": "nautilus",
+ "crypto_key": "scallop",
+ "crypto_key_version": "abalone",
+ }
+ path = DatabaseAdminClient.crypto_key_version_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = DatabaseAdminClient.parse_crypto_key_version_path(path)
+ assert expected == actual
+
+
+def test_database_path():
+ project = "squid"
+ instance = "clam"
+ database = "whelk"
expected = "projects/{project}/instances/{instance}/databases/{database}".format(
project=project, instance=instance, database=database,
@@ -4964,9 +5031,9 @@ def test_database_path():
def test_parse_database_path():
expected = {
- "project": "nautilus",
- "instance": "scallop",
- "database": "abalone",
+ "project": "octopus",
+ "instance": "oyster",
+ "database": "nudibranch",
}
path = DatabaseAdminClient.database_path(**expected)
@@ -4976,8 +5043,8 @@ def test_parse_database_path():
def test_instance_path():
- project = "squid"
- instance = "clam"
+ project = "cuttlefish"
+ instance = "mussel"
expected = "projects/{project}/instances/{instance}".format(
project=project, instance=instance,
@@ -4988,8 +5055,8 @@ def test_instance_path():
def test_parse_instance_path():
expected = {
- "project": "whelk",
- "instance": "octopus",
+ "project": "winkle",
+ "instance": "nautilus",
}
path = DatabaseAdminClient.instance_path(**expected)
@@ -4999,7 +5066,7 @@ def test_parse_instance_path():
def test_common_billing_account_path():
- billing_account = "oyster"
+ billing_account = "scallop"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
@@ -5010,7 +5077,7 @@ def test_common_billing_account_path():
def test_parse_common_billing_account_path():
expected = {
- "billing_account": "nudibranch",
+ "billing_account": "abalone",
}
path = DatabaseAdminClient.common_billing_account_path(**expected)
@@ -5020,7 +5087,7 @@ def test_parse_common_billing_account_path():
def test_common_folder_path():
- folder = "cuttlefish"
+ folder = "squid"
expected = "folders/{folder}".format(folder=folder,)
actual = DatabaseAdminClient.common_folder_path(folder)
@@ -5029,7 +5096,7 @@ def test_common_folder_path():
def test_parse_common_folder_path():
expected = {
- "folder": "mussel",
+ "folder": "clam",
}
path = DatabaseAdminClient.common_folder_path(**expected)
@@ -5039,7 +5106,7 @@ def test_parse_common_folder_path():
def test_common_organization_path():
- organization = "winkle"
+ organization = "whelk"
expected = "organizations/{organization}".format(organization=organization,)
actual = DatabaseAdminClient.common_organization_path(organization)
@@ -5048,7 +5115,7 @@ def test_common_organization_path():
def test_parse_common_organization_path():
expected = {
- "organization": "nautilus",
+ "organization": "octopus",
}
path = DatabaseAdminClient.common_organization_path(**expected)
@@ -5058,7 +5125,7 @@ def test_parse_common_organization_path():
def test_common_project_path():
- project = "scallop"
+ project = "oyster"
expected = "projects/{project}".format(project=project,)
actual = DatabaseAdminClient.common_project_path(project)
@@ -5067,7 +5134,7 @@ def test_common_project_path():
def test_parse_common_project_path():
expected = {
- "project": "abalone",
+ "project": "nudibranch",
}
path = DatabaseAdminClient.common_project_path(**expected)
@@ -5077,8 +5144,8 @@ def test_parse_common_project_path():
def test_common_location_path():
- project = "squid"
- location = "clam"
+ project = "cuttlefish"
+ location = "mussel"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
@@ -5089,8 +5156,8 @@ def test_common_location_path():
def test_parse_common_location_path():
expected = {
- "project": "whelk",
- "location": "octopus",
+ "project": "winkle",
+ "location": "nautilus",
}
path = DatabaseAdminClient.common_location_path(**expected)
diff --git a/tests/unit/spanner_dbapi/test_connect.py b/tests/unit/spanner_dbapi/test_connect.py
index 771b9d4a7f..a18781ffd1 100644
--- a/tests/unit/spanner_dbapi/test_connect.py
+++ b/tests/unit/spanner_dbapi/test_connect.py
@@ -139,3 +139,28 @@ def test_sessions_pool(self):
):
connect("test-instance", database_id, pool=pool)
database_mock.assert_called_once_with(database_id, pool=pool)
+
+ def test_connect_w_credential_file_path(self):
+ from google.cloud.spanner_dbapi import connect
+ from google.cloud.spanner_dbapi import Connection
+
+ PROJECT = "test-project"
+ USER_AGENT = "user-agent"
+ credentials = "dummy/file/path.json"
+
+ with mock.patch(
+ "google.cloud.spanner_v1.Client.from_service_account_json"
+ ) as client_mock:
+ connection = connect(
+ "test-instance",
+ "test-database",
+ PROJECT,
+ credentials=credentials,
+ user_agent=USER_AGENT,
+ )
+
+ self.assertIsInstance(connection, Connection)
+
+ client_mock.assert_called_once_with(
+ credentials, project=PROJECT, client_info=mock.ANY
+ )
diff --git a/tests/unit/spanner_dbapi/test_connection.py b/tests/unit/spanner_dbapi/test_connection.py
index f70e7fe669..772ac35032 100644
--- a/tests/unit/spanner_dbapi/test_connection.py
+++ b/tests/unit/spanner_dbapi/test_connection.py
@@ -183,6 +183,10 @@ def test_close(self):
mock_transaction.rollback = mock_rollback = mock.MagicMock()
connection.close()
mock_rollback.assert_called_once_with()
+ connection._transaction = mock.MagicMock()
+ connection._own_pool = False
+ connection.close()
+ self.assertTrue(connection.is_closed)
@mock.patch.object(warnings, "warn")
def test_commit(self, mock_warn):
@@ -379,6 +383,25 @@ def test_run_statement_dont_remember_retried_statements(self):
self.assertEqual(len(connection._statements), 0)
+ def test_run_statement_w_heterogenous_insert_statements(self):
+ """Check that Connection executed heterogenous insert statements."""
+ from google.cloud.spanner_dbapi.checksum import ResultsChecksum
+ from google.cloud.spanner_dbapi.cursor import Statement
+
+ sql = "INSERT INTO T (f1, f2) VALUES (1, 2)"
+ params = None
+ param_types = None
+
+ connection = self._make_connection()
+
+ statement = Statement(sql, params, param_types, ResultsChecksum(), True)
+ with mock.patch(
+ "google.cloud.spanner_dbapi.connection.Connection.transaction_checkout"
+ ):
+ connection.run_statement(statement, retried=True)
+
+ self.assertEqual(len(connection._statements), 0)
+
def test_run_statement_w_homogeneous_insert_statements(self):
"""Check that Connection executed homogeneous insert statements."""
from google.cloud.spanner_dbapi.checksum import ResultsChecksum
@@ -582,3 +605,132 @@ def test_retry_aborted_retry(self):
mock.call(statement, retried=True),
)
)
+
+ def test_retry_transaction_raise_max_internal_retries(self):
+ """Check retrying raise an error of max internal retries."""
+ from google.cloud.spanner_dbapi import connection as conn
+ from google.cloud.spanner_dbapi.checksum import ResultsChecksum
+ from google.cloud.spanner_dbapi.cursor import Statement
+
+ conn.MAX_INTERNAL_RETRIES = 0
+ row = ["field1", "field2"]
+ connection = self._make_connection()
+
+ checksum = ResultsChecksum()
+ checksum.consume_result(row)
+
+ statement = Statement("SELECT 1", [], {}, checksum, False)
+ connection._statements.append(statement)
+
+ with self.assertRaises(Exception):
+ connection.retry_transaction()
+
+ conn.MAX_INTERNAL_RETRIES = 50
+
+ def test_retry_aborted_retry_without_delay(self):
+ """
+ Check that in case of a retried transaction failed,
+ the connection will retry it once again.
+ """
+ from google.api_core.exceptions import Aborted
+ from google.cloud.spanner_dbapi.checksum import ResultsChecksum
+ from google.cloud.spanner_dbapi.connection import connect
+ from google.cloud.spanner_dbapi.cursor import Statement
+
+ row = ["field1", "field2"]
+
+ with mock.patch(
+ "google.cloud.spanner_v1.instance.Instance.exists", return_value=True,
+ ):
+ with mock.patch(
+ "google.cloud.spanner_v1.database.Database.exists", return_value=True,
+ ):
+ connection = connect("test-instance", "test-database")
+
+ cursor = connection.cursor()
+ cursor._checksum = ResultsChecksum()
+ cursor._checksum.consume_result(row)
+
+ statement = Statement("SELECT 1", [], {}, cursor._checksum, False)
+ connection._statements.append(statement)
+
+ metadata_mock = mock.Mock()
+ metadata_mock.trailing_metadata.return_value = {}
+
+ with mock.patch(
+ "google.cloud.spanner_dbapi.connection.Connection.run_statement",
+ side_effect=(
+ Aborted("Aborted", errors=[metadata_mock]),
+ ([row], ResultsChecksum()),
+ ),
+ ) as retry_mock:
+ with mock.patch(
+ "google.cloud.spanner_dbapi.connection._get_retry_delay",
+ return_value=False,
+ ):
+ connection.retry_transaction()
+
+ retry_mock.assert_has_calls(
+ (
+ mock.call(statement, retried=True),
+ mock.call(statement, retried=True),
+ )
+ )
+
+ def test_retry_transaction_w_multiple_statement(self):
+ """Check retrying an aborted transaction."""
+ from google.cloud.spanner_dbapi.checksum import ResultsChecksum
+ from google.cloud.spanner_dbapi.cursor import Statement
+
+ row = ["field1", "field2"]
+ connection = self._make_connection()
+
+ checksum = ResultsChecksum()
+ checksum.consume_result(row)
+ retried_checkum = ResultsChecksum()
+
+ statement = Statement("SELECT 1", [], {}, checksum, False)
+ statement1 = Statement("SELECT 2", [], {}, checksum, False)
+ connection._statements.append(statement)
+ connection._statements.append(statement1)
+
+ with mock.patch(
+ "google.cloud.spanner_dbapi.connection.Connection.run_statement",
+ return_value=([row], retried_checkum),
+ ) as run_mock:
+ with mock.patch(
+ "google.cloud.spanner_dbapi.connection._compare_checksums"
+ ) as compare_mock:
+ connection.retry_transaction()
+
+ compare_mock.assert_called_with(checksum, retried_checkum)
+
+ run_mock.assert_called_with(statement1, retried=True)
+
+ def test_retry_transaction_w_empty_response(self):
+ """Check retrying an aborted transaction."""
+ from google.cloud.spanner_dbapi.checksum import ResultsChecksum
+ from google.cloud.spanner_dbapi.cursor import Statement
+
+ row = []
+ connection = self._make_connection()
+
+ checksum = ResultsChecksum()
+ checksum.count = 1
+ retried_checkum = ResultsChecksum()
+
+ statement = Statement("SELECT 1", [], {}, checksum, False)
+ connection._statements.append(statement)
+
+ with mock.patch(
+ "google.cloud.spanner_dbapi.connection.Connection.run_statement",
+ return_value=(row, retried_checkum),
+ ) as run_mock:
+ with mock.patch(
+ "google.cloud.spanner_dbapi.connection._compare_checksums"
+ ) as compare_mock:
+ connection.retry_transaction()
+
+ compare_mock.assert_called_with(checksum, retried_checkum)
+
+ run_mock.assert_called_with(statement, retried=True)
diff --git a/tests/unit/spanner_dbapi/test_cursor.py b/tests/unit/spanner_dbapi/test_cursor.py
index c83dcb5e10..4d5db01eac 100644
--- a/tests/unit/spanner_dbapi/test_cursor.py
+++ b/tests/unit/spanner_dbapi/test_cursor.py
@@ -140,6 +140,31 @@ def test_execute_autocommit_off(self):
self.assertIsInstance(cursor._result_set, mock.MagicMock)
self.assertIsInstance(cursor._itr, PeekIterator)
+ def test_execute_insert_statement_autocommit_off(self):
+ from google.cloud.spanner_dbapi import parse_utils
+ from google.cloud.spanner_dbapi.checksum import ResultsChecksum
+ from google.cloud.spanner_dbapi.utils import PeekIterator
+
+ connection = self._make_connection(self.INSTANCE, mock.MagicMock())
+ cursor = self._make_one(connection)
+ cursor.connection._autocommit = False
+ cursor.connection.transaction_checkout = mock.MagicMock(autospec=True)
+
+ cursor._checksum = ResultsChecksum()
+ with mock.patch(
+ "google.cloud.spanner_dbapi.parse_utils.classify_stmt",
+ return_value=parse_utils.STMT_INSERT,
+ ):
+ with mock.patch(
+ "google.cloud.spanner_dbapi.connection.Connection.run_statement",
+ return_value=(mock.MagicMock(), ResultsChecksum()),
+ ):
+ cursor.execute(
+ sql="INSERT INTO django_migrations (app, name, applied) VALUES (%s, %s, %s)"
+ )
+ self.assertIsInstance(cursor._result_set, mock.MagicMock)
+ self.assertIsInstance(cursor._itr, PeekIterator)
+
def test_execute_statement(self):
from google.cloud.spanner_dbapi import parse_utils
@@ -837,3 +862,33 @@ def test_fetchmany_retry_aborted_statements_checksums_mismatch(self):
cursor.fetchmany(len(row))
run_mock.assert_called_with(statement, retried=True)
+
+ def test_ddls_with_semicolon(self):
+ """
+ Check that one script with several DDL statements separated
+ with semicolons is splitted into several DDLs.
+ """
+ from google.cloud.spanner_dbapi.connection import connect
+
+ EXP_DDLS = [
+ "CREATE TABLE table_name (row_id INT64) PRIMARY KEY ()",
+ "DROP INDEX index_name",
+ "DROP TABLE table_name",
+ ]
+
+ with mock.patch(
+ "google.cloud.spanner_v1.instance.Instance.exists", return_value=True,
+ ):
+ with mock.patch(
+ "google.cloud.spanner_v1.database.Database.exists", return_value=True,
+ ):
+ connection = connect("test-instance", "test-database")
+
+ cursor = connection.cursor()
+ cursor.execute(
+ "CREATE TABLE table_name (row_id INT64) PRIMARY KEY ();"
+ "DROP INDEX index_name;\n"
+ "DROP TABLE table_name;"
+ )
+
+ self.assertEqual(connection._ddl_statements, EXP_DDLS)
diff --git a/tests/unit/spanner_dbapi/test_parse_utils.py b/tests/unit/spanner_dbapi/test_parse_utils.py
index 6338f39e5d..3713ac11a8 100644
--- a/tests/unit/spanner_dbapi/test_parse_utils.py
+++ b/tests/unit/spanner_dbapi/test_parse_utils.py
@@ -72,20 +72,32 @@ def test_parse_insert(self):
"INSERT INTO django_migrations (app, name, applied) VALUES (%s, %s, %s)",
[1, 2, 3, 4, 5, 6],
{
- "homogenous": True,
- "table": "django_migrations",
- "columns": ["app", "name", "applied"],
- "values": [(1, 2, 3), (4, 5, 6)],
+ "sql_params_list": [
+ (
+ "INSERT INTO django_migrations (app, name, applied) VALUES (%s, %s, %s)",
+ (1, 2, 3),
+ ),
+ (
+ "INSERT INTO django_migrations (app, name, applied) VALUES (%s, %s, %s)",
+ (4, 5, 6),
+ ),
+ ]
},
),
(
"INSERT INTO django_migrations(app, name, applied) VALUES (%s, %s, %s)",
[1, 2, 3, 4, 5, 6],
{
- "homogenous": True,
- "table": "django_migrations",
- "columns": ["app", "name", "applied"],
- "values": [(1, 2, 3), (4, 5, 6)],
+ "sql_params_list": [
+ (
+ "INSERT INTO django_migrations (app, name, applied) VALUES (%s, %s, %s)",
+ (1, 2, 3),
+ ),
+ (
+ "INSERT INTO django_migrations (app, name, applied) VALUES (%s, %s, %s)",
+ (4, 5, 6),
+ ),
+ ]
},
),
(
@@ -106,23 +118,25 @@ def test_parse_insert(self):
),
(
"INSERT INTO ap (n, ct, cn) "
- "VALUES (%s, %s, %s), (%s, %s, %s), (%s, %s, %s),(%s,%s, %s)",
+ "VALUES (%s, %s, %s), (%s, %s, %s), (%s, %s, %s),(%s, %s, %s)",
(1, 2, 3, 4, 5, 6, 7, 8, 9),
{
- "homogenous": True,
- "table": "ap",
- "columns": ["n", "ct", "cn"],
- "values": [(1, 2, 3), (4, 5, 6), (7, 8, 9)],
+ "sql_params_list": [
+ ("INSERT INTO ap (n, ct, cn) VALUES (%s, %s, %s)", (1, 2, 3)),
+ ("INSERT INTO ap (n, ct, cn) VALUES (%s, %s, %s)", (4, 5, 6)),
+ ("INSERT INTO ap (n, ct, cn) VALUES (%s, %s, %s)", (7, 8, 9)),
+ ]
},
),
(
"INSERT INTO `no` (`yes`) VALUES (%s)",
(1, 4, 5),
{
- "homogenous": True,
- "table": "`no`",
- "columns": ["yes"],
- "values": [(1,), (4,), (5,)],
+ "sql_params_list": [
+ ("INSERT INTO `no` (`yes`) VALUES (%s)", (1,)),
+ ("INSERT INTO `no` (`yes`) VALUES (%s)", (4,)),
+ ("INSERT INTO `no` (`yes`) VALUES (%s)", (5,)),
+ ]
},
),
(
diff --git a/tests/unit/spanner_dbapi/test_utils.py b/tests/unit/spanner_dbapi/test_utils.py
index 4fe94f30a7..76c347d402 100644
--- a/tests/unit/spanner_dbapi/test_utils.py
+++ b/tests/unit/spanner_dbapi/test_utils.py
@@ -85,3 +85,19 @@ def test_backtick_unicode(self):
with self.subTest(sql=sql):
got = backtick_unicode(sql)
self.assertEqual(got, want)
+
+ @unittest.skipIf(skip_condition, skip_message)
+ def test_StreamedManyResultSets(self):
+ from google.cloud.spanner_dbapi.utils import StreamedManyResultSets
+
+ cases = [
+ ("iter_from_list", iter([1, 2, 3, 4, 6, 7]), [1, 2, 3, 4, 6, 7]),
+ ("iter_from_tuple", iter(("a", 12, 0xFF)), ["a", 12, 0xFF]),
+ ]
+
+ for name, data_in, expected in cases:
+ with self.subTest(name=name):
+ stream_result = StreamedManyResultSets()
+ stream_result._iterators.append(data_in)
+ actual = list(stream_result)
+ self.assertEqual(actual, expected)
diff --git a/tests/unit/test_backup.py b/tests/unit/test_backup.py
index bf6ce68a84..e80e455dbf 100644
--- a/tests/unit/test_backup.py
+++ b/tests/unit/test_backup.py
@@ -62,18 +62,52 @@ def test_ctor_defaults(self):
self.assertIsNone(backup._expire_time)
def test_ctor_non_defaults(self):
+ from google.cloud.spanner_admin_database_v1 import CreateBackupEncryptionConfig
+
instance = _Instance(self.INSTANCE_NAME)
timestamp = self._make_timestamp()
+ encryption_config = CreateBackupEncryptionConfig(
+ encryption_type=CreateBackupEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION,
+ kms_key_name="key_name",
+ )
backup = self._make_one(
- self.BACKUP_ID, instance, database=self.DATABASE_NAME, expire_time=timestamp
+ self.BACKUP_ID,
+ instance,
+ database=self.DATABASE_NAME,
+ expire_time=timestamp,
+ encryption_config=encryption_config,
+ )
+
+ self.assertEqual(backup.backup_id, self.BACKUP_ID)
+ self.assertIs(backup._instance, instance)
+ self.assertEqual(backup._database, self.DATABASE_NAME)
+ self.assertIsNotNone(backup._expire_time)
+ self.assertIs(backup._expire_time, timestamp)
+ self.assertEqual(backup._encryption_config, encryption_config)
+
+ def test_ctor_w_encryption_config_dict(self):
+ from google.cloud.spanner_admin_database_v1 import CreateBackupEncryptionConfig
+
+ instance = _Instance(self.INSTANCE_NAME)
+ timestamp = self._make_timestamp()
+
+ encryption_config = {"encryption_type": 3, "kms_key_name": "key_name"}
+ backup = self._make_one(
+ self.BACKUP_ID,
+ instance,
+ database=self.DATABASE_NAME,
+ expire_time=timestamp,
+ encryption_config=encryption_config,
)
+ expected_encryption_config = CreateBackupEncryptionConfig(**encryption_config)
self.assertEqual(backup.backup_id, self.BACKUP_ID)
self.assertIs(backup._instance, instance)
self.assertEqual(backup._database, self.DATABASE_NAME)
self.assertIsNotNone(backup._expire_time)
self.assertIs(backup._expire_time, timestamp)
+ self.assertEqual(backup._encryption_config, expected_encryption_config)
def test_from_pb_project_mismatch(self):
from google.cloud.spanner_admin_database_v1 import Backup
@@ -170,10 +204,32 @@ def test_referencing_databases_property(self):
expected = backup._referencing_databases = [self.DATABASE_NAME]
self.assertEqual(backup.referencing_databases, expected)
+ def test_encrpytion_info_property(self):
+ from google.cloud.spanner_admin_database_v1 import EncryptionInfo
+
+ instance = _Instance(self.INSTANCE_NAME)
+ backup = self._make_one(self.BACKUP_ID, instance)
+ expected = backup._encryption_info = EncryptionInfo(
+ kms_key_version="kms_key_version"
+ )
+ self.assertEqual(backup.encryption_info, expected)
+
+ def test_encryption_config_property(self):
+ from google.cloud.spanner_admin_database_v1 import CreateBackupEncryptionConfig
+
+ instance = _Instance(self.INSTANCE_NAME)
+ backup = self._make_one(self.BACKUP_ID, instance)
+ expected = backup._encryption_config = CreateBackupEncryptionConfig(
+ encryption_type=CreateBackupEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION,
+ kms_key_name="kms_key_name",
+ )
+ self.assertEqual(backup._encryption_config, expected)
+
def test_create_grpc_error(self):
from google.api_core.exceptions import GoogleAPICallError
from google.api_core.exceptions import Unknown
from google.cloud.spanner_admin_database_v1 import Backup
+ from google.cloud.spanner_admin_database_v1 import CreateBackupRequest
client = _Client()
api = client.database_admin_api = self._make_database_admin_api()
@@ -190,16 +246,18 @@ def test_create_grpc_error(self):
with self.assertRaises(GoogleAPICallError):
backup.create()
+ request = CreateBackupRequest(
+ parent=self.INSTANCE_NAME, backup_id=self.BACKUP_ID, backup=backup_pb,
+ )
+
api.create_backup.assert_called_once_with(
- parent=self.INSTANCE_NAME,
- backup_id=self.BACKUP_ID,
- backup=backup_pb,
- metadata=[("google-cloud-resource-prefix", backup.name)],
+ request=request, metadata=[("google-cloud-resource-prefix", backup.name)],
)
def test_create_already_exists(self):
from google.cloud.exceptions import Conflict
from google.cloud.spanner_admin_database_v1 import Backup
+ from google.cloud.spanner_admin_database_v1 import CreateBackupRequest
client = _Client()
api = client.database_admin_api = self._make_database_admin_api()
@@ -216,16 +274,18 @@ def test_create_already_exists(self):
with self.assertRaises(Conflict):
backup.create()
+ request = CreateBackupRequest(
+ parent=self.INSTANCE_NAME, backup_id=self.BACKUP_ID, backup=backup_pb,
+ )
+
api.create_backup.assert_called_once_with(
- parent=self.INSTANCE_NAME,
- backup_id=self.BACKUP_ID,
- backup=backup_pb,
- metadata=[("google-cloud-resource-prefix", backup.name)],
+ request=request, metadata=[("google-cloud-resource-prefix", backup.name)],
)
def test_create_instance_not_found(self):
from google.cloud.exceptions import NotFound
from google.cloud.spanner_admin_database_v1 import Backup
+ from google.cloud.spanner_admin_database_v1 import CreateBackupRequest
client = _Client()
api = client.database_admin_api = self._make_database_admin_api()
@@ -242,11 +302,12 @@ def test_create_instance_not_found(self):
with self.assertRaises(NotFound):
backup.create()
+ request = CreateBackupRequest(
+ parent=self.INSTANCE_NAME, backup_id=self.BACKUP_ID, backup=backup_pb,
+ )
+
api.create_backup.assert_called_once_with(
- parent=self.INSTANCE_NAME,
- backup_id=self.BACKUP_ID,
- backup=backup_pb,
- metadata=[("google-cloud-resource-prefix", backup.name)],
+ request=request, metadata=[("google-cloud-resource-prefix", backup.name)],
)
def test_create_expire_time_not_set(self):
@@ -266,6 +327,8 @@ def test_create_database_not_set(self):
def test_create_success(self):
from google.cloud.spanner_admin_database_v1 import Backup
+ from google.cloud.spanner_admin_database_v1 import CreateBackupRequest
+ from google.cloud.spanner_admin_database_v1 import CreateBackupEncryptionConfig
from datetime import datetime
from datetime import timedelta
from pytz import UTC
@@ -279,12 +342,14 @@ def test_create_success(self):
version_timestamp = datetime.utcnow() - timedelta(minutes=5)
version_timestamp = version_timestamp.replace(tzinfo=UTC)
expire_timestamp = self._make_timestamp()
+ encryption_config = {"encryption_type": 3, "kms_key_name": "key_name"}
backup = self._make_one(
self.BACKUP_ID,
instance,
database=self.DATABASE_NAME,
expire_time=expire_timestamp,
version_time=version_timestamp,
+ encryption_config=encryption_config,
)
backup_pb = Backup(
@@ -296,13 +361,39 @@ def test_create_success(self):
future = backup.create()
self.assertIs(future, op_future)
- api.create_backup.assert_called_once_with(
+ expected_encryption_config = CreateBackupEncryptionConfig(**encryption_config)
+ request = CreateBackupRequest(
parent=self.INSTANCE_NAME,
backup_id=self.BACKUP_ID,
backup=backup_pb,
- metadata=[("google-cloud-resource-prefix", backup.name)],
+ encryption_config=expected_encryption_config,
)
+ api.create_backup.assert_called_once_with(
+ request=request, metadata=[("google-cloud-resource-prefix", backup.name)],
+ )
+
+ def test_create_w_invalid_encryption_config(self):
+ from google.cloud.spanner_admin_database_v1 import CreateBackupEncryptionConfig
+
+ client = _Client()
+ instance = _Instance(self.INSTANCE_NAME, client=client)
+ expire_timestamp = self._make_timestamp()
+ encryption_config = {
+ "encryption_type": CreateBackupEncryptionConfig.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION,
+ "kms_key_name": "key_name",
+ }
+ backup = self._make_one(
+ self.BACKUP_ID,
+ instance,
+ database=self.DATABASE_NAME,
+ expire_time=expire_timestamp,
+ encryption_config=encryption_config,
+ )
+
+ with self.assertRaises(ValueError):
+ backup.create()
+
def test_exists_grpc_error(self):
from google.api_core.exceptions import Unknown
@@ -442,8 +533,10 @@ def test_reload_not_found(self):
def test_reload_success(self):
from google.cloud.spanner_admin_database_v1 import Backup
+ from google.cloud.spanner_admin_database_v1 import EncryptionInfo
timestamp = self._make_timestamp()
+ encryption_info = EncryptionInfo(kms_key_version="kms_key_version")
client = _Client()
backup_pb = Backup(
@@ -455,6 +548,7 @@ def test_reload_success(self):
size_bytes=10,
state=1,
referencing_databases=[],
+ encryption_info=encryption_info,
)
api = client.database_admin_api = self._make_database_admin_api()
api.get_backup.return_value = backup_pb
@@ -470,6 +564,7 @@ def test_reload_success(self):
self.assertEqual(backup.size_bytes, 10)
self.assertEqual(backup.state, Backup.State.CREATING)
self.assertEqual(backup.referencing_databases, [])
+ self.assertEqual(backup.encryption_info, encryption_info)
api.get_backup.assert_called_once_with(
name=self.BACKUP_NAME,
diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py
index 148bb79b0e..c71bab2581 100644
--- a/tests/unit/test_database.py
+++ b/tests/unit/test_database.py
@@ -16,8 +16,10 @@
import unittest
import mock
+from google.api_core import gapic_v1
from google.cloud.spanner_v1.param_types import INT64
+from google.api_core.retry import Retry
DML_WO_PARAM = """
DELETE FROM citizens
@@ -159,6 +161,18 @@ def test_ctor_w_explicit_logger(self):
self.assertFalse(database.log_commit_stats)
self.assertEqual(database._logger, logger)
+ def test_ctor_w_encryption_config(self):
+ from google.cloud.spanner_admin_database_v1 import EncryptionConfig
+
+ instance = _Instance(self.INSTANCE_NAME)
+ encryption_config = EncryptionConfig(kms_key_name="kms_key")
+ database = self._make_one(
+ self.DATABASE_ID, instance, encryption_config=encryption_config
+ )
+ self.assertEqual(database.database_id, self.DATABASE_ID)
+ self.assertIs(database._instance, instance)
+ self.assertEqual(database._encryption_config, encryption_config)
+
def test_from_pb_bad_database_name(self):
from google.cloud.spanner_admin_database_v1 import Database
@@ -295,6 +309,28 @@ def test_logger_property_custom(self):
logger = database._logger = mock.create_autospec(logging.Logger, instance=True)
self.assertEqual(database.logger, logger)
+ def test_encryption_config(self):
+ from google.cloud.spanner_admin_database_v1 import EncryptionConfig
+
+ instance = _Instance(self.INSTANCE_NAME)
+ pool = _Pool()
+ database = self._make_one(self.DATABASE_ID, instance, pool=pool)
+ encryption_config = database._encryption_config = mock.create_autospec(
+ EncryptionConfig, instance=True
+ )
+ self.assertEqual(database.encryption_config, encryption_config)
+
+ def test_encryption_info(self):
+ from google.cloud.spanner_admin_database_v1 import EncryptionInfo
+
+ instance = _Instance(self.INSTANCE_NAME)
+ pool = _Pool()
+ database = self._make_one(self.DATABASE_ID, instance, pool=pool)
+ encryption_info = database._encryption_info = [
+ mock.create_autospec(EncryptionInfo, instance=True)
+ ]
+ self.assertEqual(database.encryption_info, encryption_info)
+
def test_spanner_api_property_w_scopeless_creds(self):
client = _Client()
@@ -432,6 +468,7 @@ def test_create_grpc_error(self):
parent=self.INSTANCE_NAME,
create_statement="CREATE DATABASE {}".format(self.DATABASE_ID),
extra_statements=[],
+ encryption_config=None,
)
api.create_database.assert_called_once_with(
@@ -458,6 +495,7 @@ def test_create_already_exists(self):
parent=self.INSTANCE_NAME,
create_statement="CREATE DATABASE `{}`".format(DATABASE_ID_HYPHEN),
extra_statements=[],
+ encryption_config=None,
)
api.create_database.assert_called_once_with(
@@ -483,6 +521,7 @@ def test_create_instance_not_found(self):
parent=self.INSTANCE_NAME,
create_statement="CREATE DATABASE {}".format(self.DATABASE_ID),
extra_statements=[],
+ encryption_config=None,
)
api.create_database.assert_called_once_with(
@@ -493,6 +532,7 @@ def test_create_instance_not_found(self):
def test_create_success(self):
from tests._fixtures import DDL_STATEMENTS
from google.cloud.spanner_admin_database_v1 import CreateDatabaseRequest
+ from google.cloud.spanner_admin_database_v1 import EncryptionConfig
op_future = object()
client = _Client()
@@ -500,18 +540,61 @@ def test_create_success(self):
api.create_database.return_value = op_future
instance = _Instance(self.INSTANCE_NAME, client=client)
pool = _Pool()
+ encryption_config = EncryptionConfig(kms_key_name="kms_key_name")
database = self._make_one(
- self.DATABASE_ID, instance, ddl_statements=DDL_STATEMENTS, pool=pool
+ self.DATABASE_ID,
+ instance,
+ ddl_statements=DDL_STATEMENTS,
+ pool=pool,
+ encryption_config=encryption_config,
+ )
+
+ future = database.create()
+
+ self.assertIs(future, op_future)
+
+ expected_request = CreateDatabaseRequest(
+ parent=self.INSTANCE_NAME,
+ create_statement="CREATE DATABASE {}".format(self.DATABASE_ID),
+ extra_statements=DDL_STATEMENTS,
+ encryption_config=encryption_config,
+ )
+
+ api.create_database.assert_called_once_with(
+ request=expected_request,
+ metadata=[("google-cloud-resource-prefix", database.name)],
+ )
+
+ def test_create_success_w_encryption_config_dict(self):
+ from tests._fixtures import DDL_STATEMENTS
+ from google.cloud.spanner_admin_database_v1 import CreateDatabaseRequest
+ from google.cloud.spanner_admin_database_v1 import EncryptionConfig
+
+ op_future = object()
+ client = _Client()
+ api = client.database_admin_api = self._make_database_admin_api()
+ api.create_database.return_value = op_future
+ instance = _Instance(self.INSTANCE_NAME, client=client)
+ pool = _Pool()
+ encryption_config = {"kms_key_name": "kms_key_name"}
+ database = self._make_one(
+ self.DATABASE_ID,
+ instance,
+ ddl_statements=DDL_STATEMENTS,
+ pool=pool,
+ encryption_config=encryption_config,
)
future = database.create()
self.assertIs(future, op_future)
+ expected_encryption_config = EncryptionConfig(**encryption_config)
expected_request = CreateDatabaseRequest(
parent=self.INSTANCE_NAME,
create_statement="CREATE DATABASE {}".format(self.DATABASE_ID),
extra_statements=DDL_STATEMENTS,
+ encryption_config=expected_encryption_config,
)
api.create_database.assert_called_once_with(
@@ -611,6 +694,8 @@ def test_reload_not_found(self):
def test_reload_success(self):
from google.cloud.spanner_admin_database_v1 import Database
+ from google.cloud.spanner_admin_database_v1 import EncryptionConfig
+ from google.cloud.spanner_admin_database_v1 import EncryptionInfo
from google.cloud.spanner_admin_database_v1 import GetDatabaseDdlResponse
from google.cloud.spanner_admin_database_v1 import RestoreInfo
from google.cloud._helpers import _datetime_to_pb_timestamp
@@ -621,6 +706,13 @@ def test_reload_success(self):
client = _Client()
ddl_pb = GetDatabaseDdlResponse(statements=DDL_STATEMENTS)
+ encryption_config = EncryptionConfig(kms_key_name="kms_key")
+ encryption_info = [
+ EncryptionInfo(
+ encryption_type=EncryptionInfo.Type.CUSTOMER_MANAGED_ENCRYPTION,
+ kms_key_version="kms_key_version",
+ )
+ ]
api = client.database_admin_api = self._make_database_admin_api()
api.get_database_ddl.return_value = ddl_pb
db_pb = Database(
@@ -629,6 +721,8 @@ def test_reload_success(self):
restore_info=restore_info,
version_retention_period="1d",
earliest_version_time=_datetime_to_pb_timestamp(timestamp),
+ encryption_config=encryption_config,
+ encryption_info=encryption_info,
)
api.get_database.return_value = db_pb
instance = _Instance(self.INSTANCE_NAME, client=client)
@@ -642,6 +736,8 @@ def test_reload_success(self):
self.assertEqual(database._version_retention_period, "1d")
self.assertEqual(database._earliest_version_time, timestamp)
self.assertEqual(database._ddl_statements, tuple(DDL_STATEMENTS))
+ self.assertEqual(database._encryption_config, encryption_config)
+ self.assertEqual(database._encryption_info, encryption_info)
api.get_database_ddl.assert_called_once_with(
database=self.DATABASE_NAME,
@@ -1128,6 +1224,7 @@ def test_restore_backup_unspecified(self):
def test_restore_grpc_error(self):
from google.api_core.exceptions import Unknown
+ from google.cloud.spanner_admin_database_v1 import RestoreDatabaseRequest
client = _Client()
api = client.database_admin_api = self._make_database_admin_api()
@@ -1140,15 +1237,20 @@ def test_restore_grpc_error(self):
with self.assertRaises(Unknown):
database.restore(backup)
- api.restore_database.assert_called_once_with(
+ expected_request = RestoreDatabaseRequest(
parent=self.INSTANCE_NAME,
database_id=self.DATABASE_ID,
backup=self.BACKUP_NAME,
+ )
+
+ api.restore_database.assert_called_once_with(
+ request=expected_request,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_restore_not_found(self):
from google.api_core.exceptions import NotFound
+ from google.cloud.spanner_admin_database_v1 import RestoreDatabaseRequest
client = _Client()
api = client.database_admin_api = self._make_database_admin_api()
@@ -1161,34 +1263,115 @@ def test_restore_not_found(self):
with self.assertRaises(NotFound):
database.restore(backup)
- api.restore_database.assert_called_once_with(
+ expected_request = RestoreDatabaseRequest(
parent=self.INSTANCE_NAME,
database_id=self.DATABASE_ID,
backup=self.BACKUP_NAME,
+ )
+
+ api.restore_database.assert_called_once_with(
+ request=expected_request,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_restore_success(self):
+ from google.cloud.spanner_admin_database_v1 import (
+ RestoreDatabaseEncryptionConfig,
+ )
+ from google.cloud.spanner_admin_database_v1 import RestoreDatabaseRequest
+
op_future = object()
client = _Client()
api = client.database_admin_api = self._make_database_admin_api()
api.restore_database.return_value = op_future
instance = _Instance(self.INSTANCE_NAME, client=client)
pool = _Pool()
- database = self._make_one(self.DATABASE_ID, instance, pool=pool)
+ encryption_config = RestoreDatabaseEncryptionConfig(
+ encryption_type=RestoreDatabaseEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION,
+ kms_key_name="kms_key_name",
+ )
+ database = self._make_one(
+ self.DATABASE_ID, instance, pool=pool, encryption_config=encryption_config
+ )
backup = _Backup(self.BACKUP_NAME)
future = database.restore(backup)
self.assertIs(future, op_future)
+ expected_request = RestoreDatabaseRequest(
+ parent=self.INSTANCE_NAME,
+ database_id=self.DATABASE_ID,
+ backup=self.BACKUP_NAME,
+ encryption_config=encryption_config,
+ )
+
api.restore_database.assert_called_once_with(
+ request=expected_request,
+ metadata=[("google-cloud-resource-prefix", database.name)],
+ )
+
+ def test_restore_success_w_encryption_config_dict(self):
+ from google.cloud.spanner_admin_database_v1 import (
+ RestoreDatabaseEncryptionConfig,
+ )
+ from google.cloud.spanner_admin_database_v1 import RestoreDatabaseRequest
+
+ op_future = object()
+ client = _Client()
+ api = client.database_admin_api = self._make_database_admin_api()
+ api.restore_database.return_value = op_future
+ instance = _Instance(self.INSTANCE_NAME, client=client)
+ pool = _Pool()
+ encryption_config = {
+ "encryption_type": RestoreDatabaseEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION,
+ "kms_key_name": "kms_key_name",
+ }
+ database = self._make_one(
+ self.DATABASE_ID, instance, pool=pool, encryption_config=encryption_config
+ )
+ backup = _Backup(self.BACKUP_NAME)
+
+ future = database.restore(backup)
+
+ self.assertIs(future, op_future)
+
+ expected_encryption_config = RestoreDatabaseEncryptionConfig(
+ encryption_type=RestoreDatabaseEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION,
+ kms_key_name="kms_key_name",
+ )
+ expected_request = RestoreDatabaseRequest(
parent=self.INSTANCE_NAME,
database_id=self.DATABASE_ID,
backup=self.BACKUP_NAME,
+ encryption_config=expected_encryption_config,
+ )
+
+ api.restore_database.assert_called_once_with(
+ request=expected_request,
metadata=[("google-cloud-resource-prefix", database.name)],
)
+ def test_restore_w_invalid_encryption_config_dict(self):
+ from google.cloud.spanner_admin_database_v1 import (
+ RestoreDatabaseEncryptionConfig,
+ )
+
+ client = _Client()
+ instance = _Instance(self.INSTANCE_NAME, client=client)
+ pool = _Pool()
+ encryption_config = {
+ "encryption_type": RestoreDatabaseEncryptionConfig.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION,
+ "kms_key_name": "kms_key_name",
+ }
+ database = self._make_one(
+ self.DATABASE_ID, instance, pool=pool, encryption_config=encryption_config
+ )
+ backup = _Backup(self.BACKUP_NAME)
+
+ with self.assertRaises(ValueError):
+ database.restore(backup)
+
def test_is_ready(self):
from google.cloud.spanner_admin_database_v1 import Database
@@ -1768,6 +1951,49 @@ def test_generate_read_batches_w_max_partitions(self):
index="",
partition_size_bytes=None,
max_partitions=max_partitions,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
+ )
+
+ def test_generate_read_batches_w_retry_and_timeout_params(self):
+ max_partitions = len(self.TOKENS)
+ keyset = self._make_keyset()
+ database = self._make_database()
+ batch_txn = self._make_one(database)
+ snapshot = batch_txn._snapshot = self._make_snapshot()
+ snapshot.partition_read.return_value = self.TOKENS
+ retry = Retry(deadline=60)
+ batches = list(
+ batch_txn.generate_read_batches(
+ self.TABLE,
+ self.COLUMNS,
+ keyset,
+ max_partitions=max_partitions,
+ retry=retry,
+ timeout=2.0,
+ )
+ )
+
+ expected_read = {
+ "table": self.TABLE,
+ "columns": self.COLUMNS,
+ "keyset": {"all": True},
+ "index": "",
+ }
+ self.assertEqual(len(batches), len(self.TOKENS))
+ for batch, token in zip(batches, self.TOKENS):
+ self.assertEqual(batch["partition"], token)
+ self.assertEqual(batch["read"], expected_read)
+
+ snapshot.partition_read.assert_called_once_with(
+ table=self.TABLE,
+ columns=self.COLUMNS,
+ keyset=keyset,
+ index="",
+ partition_size_bytes=None,
+ max_partitions=max_partitions,
+ retry=retry,
+ timeout=2.0,
)
def test_generate_read_batches_w_index_w_partition_size_bytes(self):
@@ -1806,6 +2032,8 @@ def test_generate_read_batches_w_index_w_partition_size_bytes(self):
index=self.INDEX,
partition_size_bytes=size,
max_partitions=None,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
)
def test_process_read_batch(self):
@@ -1835,6 +2063,39 @@ def test_process_read_batch(self):
keyset=keyset,
index=self.INDEX,
partition=token,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
+ )
+
+ def test_process_read_batch_w_retry_timeout(self):
+ keyset = self._make_keyset()
+ token = b"TOKEN"
+ batch = {
+ "partition": token,
+ "read": {
+ "table": self.TABLE,
+ "columns": self.COLUMNS,
+ "keyset": {"all": True},
+ "index": self.INDEX,
+ },
+ }
+ database = self._make_database()
+ batch_txn = self._make_one(database)
+ snapshot = batch_txn._snapshot = self._make_snapshot()
+ expected = snapshot.read.return_value = object()
+ retry = Retry(deadline=60)
+ found = batch_txn.process_read_batch(batch, retry=retry, timeout=2.0)
+
+ self.assertIs(found, expected)
+
+ snapshot.read.assert_called_once_with(
+ table=self.TABLE,
+ columns=self.COLUMNS,
+ keyset=keyset,
+ index=self.INDEX,
+ partition=token,
+ retry=retry,
+ timeout=2.0,
)
def test_generate_query_batches_w_max_partitions(self):
@@ -1863,6 +2124,8 @@ def test_generate_query_batches_w_max_partitions(self):
param_types=None,
partition_size_bytes=None,
max_partitions=max_partitions,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
)
def test_generate_query_batches_w_params_w_partition_size_bytes(self):
@@ -1902,6 +2165,54 @@ def test_generate_query_batches_w_params_w_partition_size_bytes(self):
param_types=param_types,
partition_size_bytes=size,
max_partitions=None,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
+ )
+
+ def test_generate_query_batches_w_retry_and_timeout_params(self):
+ sql = (
+ "SELECT first_name, last_name, email FROM citizens " "WHERE age <= @max_age"
+ )
+ params = {"max_age": 30}
+ param_types = {"max_age": "INT64"}
+ size = 1 << 20
+ client = _Client(self.PROJECT_ID)
+ instance = _Instance(self.INSTANCE_NAME, client=client)
+ database = _Database(self.DATABASE_NAME, instance=instance)
+ batch_txn = self._make_one(database)
+ snapshot = batch_txn._snapshot = self._make_snapshot()
+ snapshot.partition_query.return_value = self.TOKENS
+ retry = Retry(deadline=60)
+ batches = list(
+ batch_txn.generate_query_batches(
+ sql,
+ params=params,
+ param_types=param_types,
+ partition_size_bytes=size,
+ retry=retry,
+ timeout=2.0,
+ )
+ )
+
+ expected_query = {
+ "sql": sql,
+ "params": params,
+ "param_types": param_types,
+ "query_options": client._query_options,
+ }
+ self.assertEqual(len(batches), len(self.TOKENS))
+ for batch, token in zip(batches, self.TOKENS):
+ self.assertEqual(batch["partition"], token)
+ self.assertEqual(batch["query"], expected_query)
+
+ snapshot.partition_query.assert_called_once_with(
+ sql=sql,
+ params=params,
+ param_types=param_types,
+ partition_size_bytes=size,
+ max_partitions=None,
+ retry=retry,
+ timeout=2.0,
)
def test_process_query_batch(self):
@@ -1925,7 +2236,41 @@ def test_process_query_batch(self):
self.assertIs(found, expected)
snapshot.execute_sql.assert_called_once_with(
- sql=sql, params=params, param_types=param_types, partition=token
+ sql=sql,
+ params=params,
+ param_types=param_types,
+ partition=token,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
+ )
+
+ def test_process_query_batch_w_retry_timeout(self):
+ sql = (
+ "SELECT first_name, last_name, email FROM citizens " "WHERE age <= @max_age"
+ )
+ params = {"max_age": 30}
+ param_types = {"max_age": "INT64"}
+ token = b"TOKEN"
+ batch = {
+ "partition": token,
+ "query": {"sql": sql, "params": params, "param_types": param_types},
+ }
+ database = self._make_database()
+ batch_txn = self._make_one(database)
+ snapshot = batch_txn._snapshot = self._make_snapshot()
+ expected = snapshot.execute_sql.return_value = object()
+ retry = Retry(deadline=60)
+ found = batch_txn.process_query_batch(batch, retry=retry, timeout=2.0)
+
+ self.assertIs(found, expected)
+
+ snapshot.execute_sql.assert_called_once_with(
+ sql=sql,
+ params=params,
+ param_types=param_types,
+ partition=token,
+ retry=retry,
+ timeout=2.0,
)
def test_close_wo_session(self):
@@ -1979,6 +2324,8 @@ def test_process_w_read_batch(self):
keyset=keyset,
index=self.INDEX,
partition=token,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
)
def test_process_w_query_batch(self):
@@ -2002,7 +2349,12 @@ def test_process_w_query_batch(self):
self.assertIs(found, expected)
snapshot.execute_sql.assert_called_once_with(
- sql=sql, params=params, param_types=param_types, partition=token
+ sql=sql,
+ params=params,
+ param_types=param_types,
+ partition=token,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
)
diff --git a/tests/unit/test_instance.py b/tests/unit/test_instance.py
index c1d02c5728..2ed777b25b 100644
--- a/tests/unit/test_instance.py
+++ b/tests/unit/test_instance.py
@@ -498,9 +498,14 @@ def test_database_factory_explicit(self):
DATABASE_ID = "database-id"
pool = _Pool()
logger = mock.create_autospec(Logger, instance=True)
+ encryption_config = {"kms_key_name": "kms_key_name"}
database = instance.database(
- DATABASE_ID, ddl_statements=DDL_STATEMENTS, pool=pool, logger=logger
+ DATABASE_ID,
+ ddl_statements=DDL_STATEMENTS,
+ pool=pool,
+ logger=logger,
+ encryption_config=encryption_config,
)
self.assertIsInstance(database, Database)
@@ -510,6 +515,7 @@ def test_database_factory_explicit(self):
self.assertIs(database._pool, pool)
self.assertIs(database._logger, logger)
self.assertIs(pool._bound, database)
+ self.assertIs(database._encryption_config, encryption_config)
def test_list_databases(self):
from google.cloud.spanner_admin_database_v1 import Database as DatabasePB
@@ -603,15 +609,23 @@ def test_backup_factory_explicit(self):
import datetime
from google.cloud._helpers import UTC
from google.cloud.spanner_v1.backup import Backup
+ from google.cloud.spanner_admin_database_v1 import CreateBackupEncryptionConfig
client = _Client(self.PROJECT)
instance = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME)
BACKUP_ID = "backup-id"
DATABASE_NAME = "database-name"
timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC)
+ encryption_config = CreateBackupEncryptionConfig(
+ encryption_type=CreateBackupEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION,
+ kms_key_name="kms_key_name",
+ )
backup = instance.backup(
- BACKUP_ID, database=DATABASE_NAME, expire_time=timestamp
+ BACKUP_ID,
+ database=DATABASE_NAME,
+ expire_time=timestamp,
+ encryption_config=encryption_config,
)
self.assertIsInstance(backup, Backup)
@@ -619,6 +633,7 @@ def test_backup_factory_explicit(self):
self.assertIs(backup._instance, instance)
self.assertEqual(backup._database, DATABASE_NAME)
self.assertIs(backup._expire_time, timestamp)
+ self.assertEqual(backup._encryption_config, encryption_config)
def test_list_backups_defaults(self):
from google.cloud.spanner_admin_database_v1 import Backup as BackupPB
diff --git a/tests/unit/test_snapshot.py b/tests/unit/test_snapshot.py
index 2305937204..cc9a67cb4d 100644
--- a/tests/unit/test_snapshot.py
+++ b/tests/unit/test_snapshot.py
@@ -13,7 +13,7 @@
# limitations under the License.
-import google.api_core.gapic_v1.method
+from google.api_core import gapic_v1
import mock
from tests._helpers import (
OpenTelemetryBase,
@@ -21,6 +21,7 @@
HAS_OPENTELEMETRY_INSTALLED,
)
from google.cloud.spanner_v1.param_types import INT64
+from google.api_core.retry import Retry
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
@@ -375,7 +376,15 @@ def test_read_other_error(self):
),
)
- def _read_helper(self, multi_use, first=True, count=0, partition=None):
+ def _read_helper(
+ self,
+ multi_use,
+ first=True,
+ count=0,
+ partition=None,
+ timeout=gapic_v1.method.DEFAULT,
+ retry=gapic_v1.method.DEFAULT,
+ ):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1 import (
PartialResultSet,
@@ -426,11 +435,23 @@ def _read_helper(self, multi_use, first=True, count=0, partition=None):
if partition is not None: # 'limit' and 'partition' incompatible
result_set = derived.read(
- TABLE_NAME, COLUMNS, keyset, index=INDEX, partition=partition
+ TABLE_NAME,
+ COLUMNS,
+ keyset,
+ index=INDEX,
+ partition=partition,
+ retry=retry,
+ timeout=timeout,
)
else:
result_set = derived.read(
- TABLE_NAME, COLUMNS, keyset, index=INDEX, limit=LIMIT
+ TABLE_NAME,
+ COLUMNS,
+ keyset,
+ index=INDEX,
+ limit=LIMIT,
+ retry=retry,
+ timeout=timeout,
)
self.assertEqual(derived._read_request_count, count + 1)
@@ -474,6 +495,8 @@ def _read_helper(self, multi_use, first=True, count=0, partition=None):
api.streaming_read.assert_called_once_with(
request=expected_request,
metadata=[("google-cloud-resource-prefix", database.name)],
+ retry=retry,
+ timeout=timeout,
)
self.assertSpanAttributes(
@@ -504,6 +527,17 @@ def test_read_w_multi_use_w_first_w_count_gt_0(self):
with self.assertRaises(ValueError):
self._read_helper(multi_use=True, first=True, count=1)
+ def test_read_w_timeout_param(self):
+ self._read_helper(multi_use=True, first=False, timeout=2.0)
+
+ def test_read_w_retry_param(self):
+ self._read_helper(multi_use=True, first=False, retry=Retry(deadline=60))
+
+ def test_read_w_timeout_and_retry_params(self):
+ self._read_helper(
+ multi_use=True, first=False, retry=Retry(deadline=60), timeout=2.0
+ )
+
def test_execute_sql_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
@@ -540,8 +574,8 @@ def _execute_sql_helper(
partition=None,
sql_count=0,
query_options=None,
- timeout=google.api_core.gapic_v1.method.DEFAULT,
- retry=google.api_core.gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
+ retry=gapic_v1.method.DEFAULT,
):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1 import (
@@ -698,7 +732,14 @@ def test_execute_sql_w_query_options(self):
)
def _partition_read_helper(
- self, multi_use, w_txn, size=None, max_partitions=None, index=None
+ self,
+ multi_use,
+ w_txn,
+ size=None,
+ max_partitions=None,
+ index=None,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
):
from google.cloud.spanner_v1.keyset import KeySet
from google.cloud.spanner_v1 import Partition
@@ -736,6 +777,8 @@ def _partition_read_helper(
index=index,
partition_size_bytes=size,
max_partitions=max_partitions,
+ retry=retry,
+ timeout=timeout,
)
)
@@ -759,6 +802,8 @@ def _partition_read_helper(
api.partition_read.assert_called_once_with(
request=expected_request,
metadata=[("google-cloud-resource-prefix", database.name)],
+ retry=retry,
+ timeout=timeout,
)
self.assertSpanAttributes(
@@ -809,7 +854,28 @@ def test_partition_read_ok_w_size(self):
def test_partition_read_ok_w_max_partitions(self):
self._partition_read_helper(multi_use=True, w_txn=True, max_partitions=4)
- def _partition_query_helper(self, multi_use, w_txn, size=None, max_partitions=None):
+ def test_partition_read_ok_w_timeout_param(self):
+ self._partition_read_helper(multi_use=True, w_txn=True, timeout=2.0)
+
+ def test_partition_read_ok_w_retry_param(self):
+ self._partition_read_helper(
+ multi_use=True, w_txn=True, retry=Retry(deadline=60)
+ )
+
+ def test_partition_read_ok_w_timeout_and_retry_params(self):
+ self._partition_read_helper(
+ multi_use=True, w_txn=True, retry=Retry(deadline=60), timeout=2.0
+ )
+
+ def _partition_query_helper(
+ self,
+ multi_use,
+ w_txn,
+ size=None,
+ max_partitions=None,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
+ ):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1 import Partition
from google.cloud.spanner_v1 import PartitionOptions
@@ -845,6 +911,8 @@ def _partition_query_helper(self, multi_use, w_txn, size=None, max_partitions=No
PARAM_TYPES,
partition_size_bytes=size,
max_partitions=max_partitions,
+ retry=retry,
+ timeout=timeout,
)
)
@@ -871,6 +939,8 @@ def _partition_query_helper(self, multi_use, w_txn, size=None, max_partitions=No
api.partition_query.assert_called_once_with(
request=expected_request,
metadata=[("google-cloud-resource-prefix", database.name)],
+ retry=retry,
+ timeout=timeout,
)
self.assertSpanAttributes(
@@ -926,6 +996,19 @@ def test_partition_query_ok_w_size(self):
def test_partition_query_ok_w_max_partitions(self):
self._partition_query_helper(multi_use=True, w_txn=True, max_partitions=4)
+ def test_partition_query_ok_w_timeout_param(self):
+ self._partition_query_helper(multi_use=True, w_txn=True, timeout=2.0)
+
+ def test_partition_query_ok_w_retry_param(self):
+ self._partition_query_helper(
+ multi_use=True, w_txn=True, retry=Retry(deadline=30)
+ )
+
+ def test_partition_query_ok_w_timeout_and_retry_params(self):
+ self._partition_query_helper(
+ multi_use=True, w_txn=True, retry=Retry(deadline=60), timeout=2.0
+ )
+
class TestSnapshot(OpenTelemetryBase):
diff --git a/tests/unit/test_streamed.py b/tests/unit/test_streamed.py
index 63f3bf81fe..7b12f6a94b 100644
--- a/tests/unit/test_streamed.py
+++ b/tests/unit/test_streamed.py
@@ -336,11 +336,11 @@ def test__merge_chunk_array_of_string(self):
FIELDS = [self._make_array_field("name", element_type_code=TypeCode.STRING)]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_list_value([u"A", u"B", u"C"])
- chunk = self._make_list_value([None, u"D", u"E"])
+ chunk = self._make_list_value([u"D", u"E"])
merged = streamed._merge_chunk(chunk)
- expected = self._make_list_value([u"A", u"B", u"C", None, u"D", u"E"])
+ expected = self._make_list_value([u"A", u"B", u"CD", u"E"])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
@@ -352,11 +352,25 @@ def test__merge_chunk_array_of_string_with_null(self):
FIELDS = [self._make_array_field("name", element_type_code=TypeCode.STRING)]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_list_value([u"A", u"B", u"C"])
- chunk = self._make_list_value([u"D", u"E"])
+ chunk = self._make_list_value([None, u"D", u"E"])
merged = streamed._merge_chunk(chunk)
- expected = self._make_list_value([u"A", u"B", u"CD", u"E"])
+ expected = self._make_list_value([u"A", u"B", u"C", None, u"D", u"E"])
+ self.assertEqual(merged, expected)
+ self.assertIsNone(streamed._pending_chunk)
+
+ def test__merge_chunk_array_of_string_with_null_pending(self):
+ from google.cloud.spanner_v1 import TypeCode
+
+ iterator = _MockCancellableIterator()
+ streamed = self._make_one(iterator)
+ FIELDS = [self._make_array_field("name", element_type_code=TypeCode.STRING)]
+ streamed._metadata = self._make_result_set_metadata(FIELDS)
+ streamed._pending_chunk = self._make_list_value([u"A", u"B", u"C", None])
+ chunk = self._make_list_value([u"D", u"E"])
+ merged = streamed._merge_chunk(chunk)
+ expected = self._make_list_value([u"A", u"B", u"C", None, u"D", u"E"])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
diff --git a/tests/unit/test_transaction.py b/tests/unit/test_transaction.py
index 4dc56bfa06..923a6ec47d 100644
--- a/tests/unit/test_transaction.py
+++ b/tests/unit/test_transaction.py
@@ -17,6 +17,8 @@
from tests._helpers import OpenTelemetryBase, StatusCanonicalCode
from google.cloud.spanner_v1 import Type
from google.cloud.spanner_v1 import TypeCode
+from google.api_core.retry import Retry
+from google.api_core import gapic_v1
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
@@ -410,7 +412,13 @@ def test_execute_update_w_params_wo_param_types(self):
with self.assertRaises(ValueError):
transaction.execute_update(DML_QUERY_WITH_PARAM, PARAMS)
- def _execute_update_helper(self, count=0, query_options=None):
+ def _execute_update_helper(
+ self,
+ count=0,
+ query_options=None,
+ retry=gapic_v1.method.DEFAULT,
+ timeout=gapic_v1.method.DEFAULT,
+ ):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1 import (
ResultSet,
@@ -439,6 +447,8 @@ def _execute_update_helper(self, count=0, query_options=None):
PARAM_TYPES,
query_mode=MODE,
query_options=query_options,
+ retry=retry,
+ timeout=timeout,
)
self.assertEqual(row_count, 1)
@@ -466,6 +476,8 @@ def _execute_update_helper(self, count=0, query_options=None):
)
api.execute_sql.assert_called_once_with(
request=expected_request,
+ retry=retry,
+ timeout=timeout,
metadata=[("google-cloud-resource-prefix", database.name)],
)
@@ -477,6 +489,15 @@ def test_execute_update_new_transaction(self):
def test_execute_update_w_count(self):
self._execute_update_helper(count=1)
+ def test_execute_update_w_timeout_param(self):
+ self._execute_update_helper(timeout=2.0)
+
+ def test_execute_update_w_retry_param(self):
+ self._execute_update_helper(retry=Retry(deadline=60))
+
+ def test_execute_update_w_timeout_and_retry_params(self):
+ self._execute_update_helper(retry=Retry(deadline=60), timeout=2.0)
+
def test_execute_update_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()