diff --git a/.coveragerc b/.coveragerc index dd39c8546c..8e75debec9 100644 --- a/.coveragerc +++ b/.coveragerc @@ -17,6 +17,9 @@ # Generated by synthtool. DO NOT EDIT! [run] branch = True +omit = + /tmp/* + .nox/* [report] fail_under = 100 @@ -29,7 +32,9 @@ exclude_lines = # Ignore abstract methods raise NotImplementedError omit = + /tmp/* + .nox/* */gapic/*.py */proto/*.py */core/*.py - */site-packages/*.py \ No newline at end of file + */site-packages/*.py diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 330f57d782..ce36ab9157 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -ARG VARIANT="3.8" +ARG VARIANT="3.12" FROM mcr.microsoft.com/devcontainers/python:${VARIANT} #install nox diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt index a4d4017860..3053bad715 100644 --- a/.devcontainer/requirements.txt +++ b/.devcontainer/requirements.txt @@ -4,35 +4,35 @@ # # pip-compile --generate-hashes requirements.in # -argcomplete==3.1.1 \ - --hash=sha256:35fa893a88deea85ea7b20d241100e64516d6af6d7b0ae2bed1d263d26f70948 \ - --hash=sha256:6c4c563f14f01440aaffa3eae13441c5db2357b5eec639abe7c0b15334627dff +argcomplete==3.2.1 \ + --hash=sha256:30891d87f3c1abe091f2142613c9d33cac84a5e15404489f033b20399b691fec \ + --hash=sha256:437f67fb9b058da5a090df505ef9be0297c4883993f3f56cb186ff087778cfb4 # via nox -colorlog==6.7.0 \ - --hash=sha256:0d33ca236784a1ba3ff9c532d4964126d8a2c44f1f0cb1d2b0728196f512f662 \ - --hash=sha256:bd94bd21c1e13fac7bd3153f4bc3a7dc0eb0974b8bc2fdf1a989e474f6e582e5 +colorlog==6.8.0 \ + --hash=sha256:4ed23b05a1154294ac99f511fabe8c1d6d4364ec1f7fc989c7fb515ccc29d375 \ + --hash=sha256:fbb6fdf9d5685f2517f388fb29bb27d54e8654dd31f58bc2a3b217e967a95ca6 # via nox -distlib==0.3.7 \ - --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ - --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 +distlib==0.3.8 \ + --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ + --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -filelock==3.12.2 \ - --hash=sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81 \ - --hash=sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec +filelock==3.13.1 \ + --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \ + --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c # via virtualenv nox==2023.4.22 \ --hash=sha256:0b1adc619c58ab4fa57d6ab2e7823fe47a32e70202f287d78474adcc7bda1891 \ --hash=sha256:46c0560b0dc609d7d967dc99e22cb463d3c4caf54a5fda735d6c11b5177e3a9f # via -r requirements.in -packaging==23.1 \ - --hash=sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61 \ - --hash=sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f +packaging==23.2 \ + --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \ + --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 # via nox -platformdirs==3.9.1 \ - --hash=sha256:1b42b450ad933e981d56e59f1b97495428c9bd60698baab9f3eb3d00d5822421 \ - --hash=sha256:ad8291ae0ae5072f66c16945166cb11c63394c7a3ad1b1bc9828ca3162da8c2f +platformdirs==4.1.0 \ + --hash=sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380 \ + --hash=sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420 # via virtualenv -virtualenv==20.24.1 \ - --hash=sha256:01aacf8decd346cf9a865ae85c0cdc7f64c8caa07ff0d8b1dfc1733d10677442 \ - --hash=sha256:2ef6a237c31629da6442b0bcaa3999748108c7166318d1f55cc9f8d7294e97bd +virtualenv==20.25.0 \ + --hash=sha256:4238949c5ffe6876362d9c0180fc6c3a824a7b12b80604eeb8085f2ed7460de3 \ + --hash=sha256:bf51c0d9c7dd63ea8e44086fa1e4fb1093a31e963b86959257378aef020e1f1b # via nox diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index a3da1b0d4c..773c1dfd21 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:3e3800bb100af5d7f9e810d48212b37812c1856d20ffeafb99ebe66461b61fc7 -# created: 2023-08-02T10:53:29.114535628Z + digest: sha256:2f155882785883336b4468d5218db737bb1d10c9cea7cb62219ad16fe248c03c +# created: 2023-11-29T14:54:29.548172703Z diff --git a/.github/sync-repo-settings.yaml b/.github/sync-repo-settings.yaml index 6ee95fb8ed..5ee2bca9f9 100644 --- a/.github/sync-repo-settings.yaml +++ b/.github/sync-repo-settings.yaml @@ -11,5 +11,5 @@ branchProtectionRules: - 'Kokoro system-3.8' - 'cla/google' - 'Samples - Lint' - - 'Samples - Python 3.7' - 'Samples - Python 3.8' + - 'Samples - Python 3.12' diff --git a/.github/workflows/integration-tests-against-emulator.yaml b/.github/workflows/integration-tests-against-emulator.yaml index 8f074c1555..3a4390219d 100644 --- a/.github/workflows/integration-tests-against-emulator.yaml +++ b/.github/workflows/integration-tests-against-emulator.yaml @@ -17,9 +17,9 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.8 - name: Install nox diff --git a/.gitignore b/.gitignore index b4243ced74..d083ea1ddc 100644 --- a/.gitignore +++ b/.gitignore @@ -50,6 +50,7 @@ docs.metadata # Virtual environment env/ +venv/ # Test logs coverage.xml diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index 029bd342de..e5c1ffca94 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -4,91 +4,75 @@ # # pip-compile --allow-unsafe --generate-hashes requirements.in # -argcomplete==2.0.0 \ - --hash=sha256:6372ad78c89d662035101418ae253668445b391755cfe94ea52f1b9d22425b20 \ - --hash=sha256:cffa11ea77999bb0dd27bb25ff6dc142a6796142f68d45b1a26b11f58724561e +argcomplete==3.1.4 \ + --hash=sha256:72558ba729e4c468572609817226fb0a6e7e9a0a7d477b882be168c0b4a62b94 \ + --hash=sha256:fbe56f8cda08aa9a04b307d8482ea703e96a6a801611acb4be9bf3942017989f # via nox -attrs==22.1.0 \ - --hash=sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6 \ - --hash=sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c +attrs==23.1.0 \ + --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \ + --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015 # via gcp-releasetool -bleach==5.0.1 \ - --hash=sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a \ - --hash=sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c - # via readme-renderer -cachetools==5.2.0 \ - --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \ - --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db +cachetools==5.3.2 \ + --hash=sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2 \ + --hash=sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1 # via google-auth certifi==2023.7.22 \ --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 # via requests -cffi==1.15.1 \ - --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ - --hash=sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef \ - --hash=sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104 \ - --hash=sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426 \ - --hash=sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405 \ - --hash=sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375 \ - --hash=sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a \ - --hash=sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e \ - --hash=sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc \ - --hash=sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf \ - --hash=sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185 \ - --hash=sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497 \ - --hash=sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3 \ - --hash=sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35 \ - --hash=sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c \ - --hash=sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83 \ - --hash=sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21 \ - --hash=sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca \ - --hash=sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984 \ - --hash=sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac \ - --hash=sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd \ - --hash=sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee \ - --hash=sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a \ - --hash=sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2 \ - --hash=sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192 \ - --hash=sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7 \ - --hash=sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585 \ - --hash=sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f \ - --hash=sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e \ - --hash=sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27 \ - --hash=sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b \ - --hash=sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e \ - --hash=sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e \ - --hash=sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d \ - --hash=sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c \ - --hash=sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415 \ - --hash=sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82 \ - --hash=sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02 \ - --hash=sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314 \ - --hash=sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325 \ - --hash=sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c \ - --hash=sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3 \ - --hash=sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914 \ - --hash=sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045 \ - --hash=sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d \ - --hash=sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9 \ - --hash=sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5 \ - --hash=sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2 \ - --hash=sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c \ - --hash=sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3 \ - --hash=sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2 \ - --hash=sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8 \ - --hash=sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d \ - --hash=sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d \ - --hash=sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9 \ - --hash=sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162 \ - --hash=sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76 \ - --hash=sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4 \ - --hash=sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e \ - --hash=sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9 \ - --hash=sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6 \ - --hash=sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b \ - --hash=sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01 \ - --hash=sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0 +cffi==1.16.0 \ + --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ + --hash=sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a \ + --hash=sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417 \ + --hash=sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab \ + --hash=sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520 \ + --hash=sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36 \ + --hash=sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743 \ + --hash=sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8 \ + --hash=sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed \ + --hash=sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684 \ + --hash=sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56 \ + --hash=sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324 \ + --hash=sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d \ + --hash=sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235 \ + --hash=sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e \ + --hash=sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088 \ + --hash=sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000 \ + --hash=sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7 \ + --hash=sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e \ + --hash=sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673 \ + --hash=sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c \ + --hash=sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe \ + --hash=sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2 \ + --hash=sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098 \ + --hash=sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8 \ + --hash=sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a \ + --hash=sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0 \ + --hash=sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b \ + --hash=sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896 \ + --hash=sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e \ + --hash=sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9 \ + --hash=sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2 \ + --hash=sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b \ + --hash=sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6 \ + --hash=sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404 \ + --hash=sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f \ + --hash=sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0 \ + --hash=sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4 \ + --hash=sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc \ + --hash=sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936 \ + --hash=sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba \ + --hash=sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872 \ + --hash=sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb \ + --hash=sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614 \ + --hash=sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1 \ + --hash=sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d \ + --hash=sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969 \ + --hash=sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b \ + --hash=sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4 \ + --hash=sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627 \ + --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \ + --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357 # via cryptography charset-normalizer==2.1.1 \ --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \ @@ -109,78 +93,74 @@ colorlog==6.7.0 \ # via # gcp-docuploader # nox -commonmark==0.9.1 \ - --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ - --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 - # via rich -cryptography==41.0.3 \ - --hash=sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306 \ - --hash=sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84 \ - --hash=sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47 \ - --hash=sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d \ - --hash=sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116 \ - --hash=sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207 \ - --hash=sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81 \ - --hash=sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087 \ - --hash=sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd \ - --hash=sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507 \ - --hash=sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858 \ - --hash=sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae \ - --hash=sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34 \ - --hash=sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906 \ - --hash=sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd \ - --hash=sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922 \ - --hash=sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7 \ - --hash=sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4 \ - --hash=sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574 \ - --hash=sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1 \ - --hash=sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c \ - --hash=sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e \ - --hash=sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de +cryptography==41.0.6 \ + --hash=sha256:068bc551698c234742c40049e46840843f3d98ad7ce265fd2bd4ec0d11306596 \ + --hash=sha256:0f27acb55a4e77b9be8d550d762b0513ef3fc658cd3eb15110ebbcbd626db12c \ + --hash=sha256:2132d5865eea673fe6712c2ed5fb4fa49dba10768bb4cc798345748380ee3660 \ + --hash=sha256:3288acccef021e3c3c10d58933f44e8602cf04dba96d9796d70d537bb2f4bbc4 \ + --hash=sha256:35f3f288e83c3f6f10752467c48919a7a94b7d88cc00b0668372a0d2ad4f8ead \ + --hash=sha256:398ae1fc711b5eb78e977daa3cbf47cec20f2c08c5da129b7a296055fbb22aed \ + --hash=sha256:422e3e31d63743855e43e5a6fcc8b4acab860f560f9321b0ee6269cc7ed70cc3 \ + --hash=sha256:48783b7e2bef51224020efb61b42704207dde583d7e371ef8fc2a5fb6c0aabc7 \ + --hash=sha256:4d03186af98b1c01a4eda396b137f29e4e3fb0173e30f885e27acec8823c1b09 \ + --hash=sha256:5daeb18e7886a358064a68dbcaf441c036cbdb7da52ae744e7b9207b04d3908c \ + --hash=sha256:60e746b11b937911dc70d164060d28d273e31853bb359e2b2033c9e93e6f3c43 \ + --hash=sha256:742ae5e9a2310e9dade7932f9576606836ed174da3c7d26bc3d3ab4bd49b9f65 \ + --hash=sha256:7e00fb556bda398b99b0da289ce7053639d33b572847181d6483ad89835115f6 \ + --hash=sha256:85abd057699b98fce40b41737afb234fef05c67e116f6f3650782c10862c43da \ + --hash=sha256:8efb2af8d4ba9dbc9c9dd8f04d19a7abb5b49eab1f3694e7b5a16a5fc2856f5c \ + --hash=sha256:ae236bb8760c1e55b7a39b6d4d32d2279bc6c7c8500b7d5a13b6fb9fc97be35b \ + --hash=sha256:afda76d84b053923c27ede5edc1ed7d53e3c9f475ebaf63c68e69f1403c405a8 \ + --hash=sha256:b27a7fd4229abef715e064269d98a7e2909ebf92eb6912a9603c7e14c181928c \ + --hash=sha256:b648fe2a45e426aaee684ddca2632f62ec4613ef362f4d681a9a6283d10e079d \ + --hash=sha256:c5a550dc7a3b50b116323e3d376241829fd326ac47bc195e04eb33a8170902a9 \ + --hash=sha256:da46e2b5df770070412c46f87bac0849b8d685c5f2679771de277a422c7d0b86 \ + --hash=sha256:f39812f70fc5c71a15aa3c97b2bbe213c3f2a460b79bd21c40d033bb34a9bf36 \ + --hash=sha256:ff369dd19e8fe0528b02e8df9f2aeb2479f89b1270d90f96a63500afe9af5cae # via # gcp-releasetool # secretstorage -distlib==0.3.6 \ - --hash=sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46 \ - --hash=sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e +distlib==0.3.7 \ + --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ + --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 # via virtualenv -docutils==0.19 \ - --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ - --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc +docutils==0.20.1 \ + --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \ + --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b # via readme-renderer -filelock==3.8.0 \ - --hash=sha256:55447caa666f2198c5b6b13a26d2084d26fa5b115c00d065664b2124680c4edc \ - --hash=sha256:617eb4e5eedc82fc5f47b6d61e4d11cb837c56cb4544e39081099fa17ad109d4 +filelock==3.13.1 \ + --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \ + --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c # via virtualenv -gcp-docuploader==0.6.4 \ - --hash=sha256:01486419e24633af78fd0167db74a2763974765ee8078ca6eb6964d0ebd388af \ - --hash=sha256:70861190c123d907b3b067da896265ead2eeb9263969d6955c9e0bb091b5ccbf +gcp-docuploader==0.6.5 \ + --hash=sha256:30221d4ac3e5a2b9c69aa52fdbef68cc3f27d0e6d0d90e220fc024584b8d2318 \ + --hash=sha256:b7458ef93f605b9d46a4bf3a8dc1755dad1f31d030c8679edf304e343b347eea # via -r requirements.in -gcp-releasetool==1.10.5 \ - --hash=sha256:174b7b102d704b254f2a26a3eda2c684fd3543320ec239baf771542a2e58e109 \ - --hash=sha256:e29d29927fe2ca493105a82958c6873bb2b90d503acac56be2c229e74de0eec9 +gcp-releasetool==1.16.0 \ + --hash=sha256:27bf19d2e87aaa884096ff941aa3c592c482be3d6a2bfe6f06afafa6af2353e3 \ + --hash=sha256:a316b197a543fd036209d0caba7a8eb4d236d8e65381c80cbc6d7efaa7606d63 # via -r requirements.in -google-api-core==2.10.2 \ - --hash=sha256:10c06f7739fe57781f87523375e8e1a3a4674bf6392cd6131a3222182b971320 \ - --hash=sha256:34f24bd1d5f72a8c4519773d99ca6bf080a6c4e041b4e9f024fe230191dda62e +google-api-core==2.12.0 \ + --hash=sha256:c22e01b1e3c4dcd90998494879612c38d0a3411d1f7b679eb89e2abe3ce1f553 \ + --hash=sha256:ec6054f7d64ad13b41e43d96f735acbd763b0f3b695dabaa2d579673f6a6e160 # via # google-cloud-core # google-cloud-storage -google-auth==2.14.1 \ - --hash=sha256:ccaa901f31ad5cbb562615eb8b664b3dd0bf5404a67618e642307f00613eda4d \ - --hash=sha256:f5d8701633bebc12e0deea4df8abd8aff31c28b355360597f7f2ee60f2e4d016 +google-auth==2.23.4 \ + --hash=sha256:79905d6b1652187def79d491d6e23d0cbb3a21d3c7ba0dbaa9c8a01906b13ff3 \ + --hash=sha256:d4bbc92fe4b8bfd2f3e8d88e5ba7085935da208ee38a134fc280e7ce682a05f2 # via # gcp-releasetool # google-api-core # google-cloud-core # google-cloud-storage -google-cloud-core==2.3.2 \ - --hash=sha256:8417acf6466be2fa85123441696c4badda48db314c607cf1e5d543fa8bdc22fe \ - --hash=sha256:b9529ee7047fd8d4bf4a2182de619154240df17fbe60ead399078c1ae152af9a +google-cloud-core==2.3.3 \ + --hash=sha256:37b80273c8d7eee1ae816b3a20ae43585ea50506cb0e60f3cf5be5f87f1373cb \ + --hash=sha256:fbd11cad3e98a7e5b0343dc07cb1039a5ffd7a5bb96e1f1e27cee4bda4a90863 # via google-cloud-storage -google-cloud-storage==2.6.0 \ - --hash=sha256:104ca28ae61243b637f2f01455cc8a05e8f15a2a18ced96cb587241cdd3820f5 \ - --hash=sha256:4ad0415ff61abdd8bb2ae81c1f8f7ec7d91a1011613f2db87c614c550f97bfe9 +google-cloud-storage==2.13.0 \ + --hash=sha256:ab0bf2e1780a1b74cf17fccb13788070b729f50c252f0c94ada2aae0ca95437d \ + --hash=sha256:f62dc4c7b6cd4360d072e3deb28035fbdad491ac3d9b0b1815a12daea10f37c7 # via gcp-docuploader google-crc32c==1.5.0 \ --hash=sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a \ @@ -251,29 +231,31 @@ google-crc32c==1.5.0 \ --hash=sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183 \ --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \ --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4 - # via google-resumable-media -google-resumable-media==2.4.0 \ - --hash=sha256:2aa004c16d295c8f6c33b2b4788ba59d366677c0a25ae7382436cb30f776deaa \ - --hash=sha256:8d5518502f92b9ecc84ac46779bd4f09694ecb3ba38a3e7ca737a86d15cbca1f + # via + # google-cloud-storage + # google-resumable-media +google-resumable-media==2.6.0 \ + --hash=sha256:972852f6c65f933e15a4a210c2b96930763b47197cdf4aa5f5bea435efb626e7 \ + --hash=sha256:fc03d344381970f79eebb632a3c18bb1828593a2dc5572b5f90115ef7d11e81b # via google-cloud-storage -googleapis-common-protos==1.57.0 \ - --hash=sha256:27a849d6205838fb6cc3c1c21cb9800707a661bb21c6ce7fb13e99eb1f8a0c46 \ - --hash=sha256:a9f4a1d7f6d9809657b7f1316a1aa527f6664891531bcfcc13b6696e685f443c +googleapis-common-protos==1.61.0 \ + --hash=sha256:22f1915393bb3245343f6efe87f6fe868532efc12aa26b391b15132e1279f1c0 \ + --hash=sha256:8a64866a97f6304a7179873a465d6eee97b7a24ec6cfd78e0f575e96b821240b # via google-api-core idna==3.4 \ --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 # via requests -importlib-metadata==5.0.0 \ - --hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \ - --hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43 +importlib-metadata==6.8.0 \ + --hash=sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb \ + --hash=sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743 # via # -r requirements.in # keyring # twine -jaraco-classes==3.2.3 \ - --hash=sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158 \ - --hash=sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a +jaraco-classes==3.3.0 \ + --hash=sha256:10afa92b6743f25c0cf5f37c6bb6e18e2c5bb84a16527ccfc0040ea377e7aaeb \ + --hash=sha256:c063dd08e89217cee02c8d5e5ec560f2c8ce6cdc2fcdc2e68f7b2e5547ed3621 # via keyring jeepney==0.8.0 \ --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \ @@ -285,75 +267,121 @@ jinja2==3.1.2 \ --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 # via gcp-releasetool -keyring==23.11.0 \ - --hash=sha256:3dd30011d555f1345dec2c262f0153f2f0ca6bca041fb1dc4588349bb4c0ac1e \ - --hash=sha256:ad192263e2cdd5f12875dedc2da13534359a7e760e77f8d04b50968a821c2361 +keyring==24.2.0 \ + --hash=sha256:4901caaf597bfd3bbd78c9a0c7c4c29fcd8310dab2cffefe749e916b6527acd6 \ + --hash=sha256:ca0746a19ec421219f4d713f848fa297a661a8a8c1504867e55bfb5e09091509 # via # gcp-releasetool # twine -markupsafe==2.1.1 \ - --hash=sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003 \ - --hash=sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88 \ - --hash=sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5 \ - --hash=sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7 \ - --hash=sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a \ - --hash=sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603 \ - --hash=sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1 \ - --hash=sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135 \ - --hash=sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247 \ - --hash=sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6 \ - --hash=sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601 \ - --hash=sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77 \ - --hash=sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02 \ - --hash=sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e \ - --hash=sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63 \ - --hash=sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f \ - --hash=sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980 \ - --hash=sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b \ - --hash=sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812 \ - --hash=sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff \ - --hash=sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96 \ - --hash=sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1 \ - --hash=sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925 \ - --hash=sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a \ - --hash=sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6 \ - --hash=sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e \ - --hash=sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f \ - --hash=sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4 \ - --hash=sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f \ - --hash=sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3 \ - --hash=sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c \ - --hash=sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a \ - --hash=sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417 \ - --hash=sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a \ - --hash=sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a \ - --hash=sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37 \ - --hash=sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452 \ - --hash=sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933 \ - --hash=sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a \ - --hash=sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7 +markdown-it-py==3.0.0 \ + --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \ + --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb + # via rich +markupsafe==2.1.3 \ + --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \ + --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \ + --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \ + --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \ + --hash=sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c \ + --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \ + --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \ + --hash=sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb \ + --hash=sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939 \ + --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \ + --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \ + --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \ + --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \ + --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \ + --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \ + --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \ + --hash=sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd \ + --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \ + --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \ + --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \ + --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \ + --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \ + --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \ + --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \ + --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \ + --hash=sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007 \ + --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \ + --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \ + --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \ + --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \ + --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \ + --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \ + --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \ + --hash=sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1 \ + --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \ + --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \ + --hash=sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c \ + --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \ + --hash=sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823 \ + --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \ + --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \ + --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \ + --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \ + --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \ + --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \ + --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \ + --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \ + --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \ + --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \ + --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \ + --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \ + --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \ + --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \ + --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \ + --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \ + --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \ + --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \ + --hash=sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc \ + --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2 \ + --hash=sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11 # via jinja2 -more-itertools==9.0.0 \ - --hash=sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41 \ - --hash=sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab +mdurl==0.1.2 \ + --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ + --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba + # via markdown-it-py +more-itertools==10.1.0 \ + --hash=sha256:626c369fa0eb37bac0291bce8259b332fd59ac792fa5497b59837309cd5b114a \ + --hash=sha256:64e0735fcfdc6f3464ea133afe8ea4483b1c5fe3a3d69852e6503b43a0b222e6 # via jaraco-classes -nox==2022.11.21 \ - --hash=sha256:0e41a990e290e274cb205a976c4c97ee3c5234441a8132c8c3fd9ea3c22149eb \ - --hash=sha256:e21c31de0711d1274ca585a2c5fde36b1aa962005ba8e9322bf5eeed16dcd684 +nh3==0.2.14 \ + --hash=sha256:116c9515937f94f0057ef50ebcbcc10600860065953ba56f14473ff706371873 \ + --hash=sha256:18415df36db9b001f71a42a3a5395db79cf23d556996090d293764436e98e8ad \ + --hash=sha256:203cac86e313cf6486704d0ec620a992c8bc164c86d3a4fd3d761dd552d839b5 \ + --hash=sha256:2b0be5c792bd43d0abef8ca39dd8acb3c0611052ce466d0401d51ea0d9aa7525 \ + --hash=sha256:377aaf6a9e7c63962f367158d808c6a1344e2b4f83d071c43fbd631b75c4f0b2 \ + --hash=sha256:525846c56c2bcd376f5eaee76063ebf33cf1e620c1498b2a40107f60cfc6054e \ + --hash=sha256:5529a3bf99402c34056576d80ae5547123f1078da76aa99e8ed79e44fa67282d \ + --hash=sha256:7771d43222b639a4cd9e341f870cee336b9d886de1ad9bec8dddab22fe1de450 \ + --hash=sha256:88c753efbcdfc2644a5012938c6b9753f1c64a5723a67f0301ca43e7b85dcf0e \ + --hash=sha256:93a943cfd3e33bd03f77b97baa11990148687877b74193bf777956b67054dcc6 \ + --hash=sha256:9be2f68fb9a40d8440cbf34cbf40758aa7f6093160bfc7fb018cce8e424f0c3a \ + --hash=sha256:a0c509894fd4dccdff557068e5074999ae3b75f4c5a2d6fb5415e782e25679c4 \ + --hash=sha256:ac8056e937f264995a82bf0053ca898a1cb1c9efc7cd68fa07fe0060734df7e4 \ + --hash=sha256:aed56a86daa43966dd790ba86d4b810b219f75b4bb737461b6886ce2bde38fd6 \ + --hash=sha256:e8986f1dd3221d1e741fda0a12eaa4a273f1d80a35e31a1ffe579e7c621d069e \ + --hash=sha256:f99212a81c62b5f22f9e7c3e347aa00491114a5647e1f13bbebd79c3e5f08d75 + # via readme-renderer +nox==2023.4.22 \ + --hash=sha256:0b1adc619c58ab4fa57d6ab2e7823fe47a32e70202f287d78474adcc7bda1891 \ + --hash=sha256:46c0560b0dc609d7d967dc99e22cb463d3c4caf54a5fda735d6c11b5177e3a9f # via -r requirements.in -packaging==21.3 \ - --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ - --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522 +packaging==23.2 \ + --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \ + --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 # via # gcp-releasetool # nox -pkginfo==1.8.3 \ - --hash=sha256:848865108ec99d4901b2f7e84058b6e7660aae8ae10164e015a6dcf5b242a594 \ - --hash=sha256:a84da4318dd86f870a9447a8c98340aa06216bfc6f2b7bdc4b8766984ae1867c +pkginfo==1.9.6 \ + --hash=sha256:4b7a555a6d5a22169fcc9cf7bfd78d296b0361adad412a346c1226849af5e546 \ + --hash=sha256:8fd5896e8718a4372f0ea9cc9d96f6417c9b986e23a4d116dda26b62cc29d046 # via twine -platformdirs==2.5.4 \ - --hash=sha256:1006647646d80f16130f052404c6b901e80ee4ed6bef6792e1f238a8969106f7 \ - --hash=sha256:af0276409f9a02373d540bf8480021a048711d572745aef4b7842dad245eba10 +platformdirs==3.11.0 \ + --hash=sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3 \ + --hash=sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e # via virtualenv protobuf==3.20.3 \ --hash=sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7 \ @@ -382,34 +410,31 @@ protobuf==3.20.3 \ # gcp-docuploader # gcp-releasetool # google-api-core -pyasn1==0.4.8 \ - --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \ - --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba + # googleapis-common-protos +pyasn1==0.5.0 \ + --hash=sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57 \ + --hash=sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde # via # pyasn1-modules # rsa -pyasn1-modules==0.2.8 \ - --hash=sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e \ - --hash=sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74 +pyasn1-modules==0.3.0 \ + --hash=sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c \ + --hash=sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d # via google-auth pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 # via cffi -pygments==2.15.0 \ - --hash=sha256:77a3299119af881904cd5ecd1ac6a66214b6e9bed1f2db16993b54adede64094 \ - --hash=sha256:f7e36cffc4c517fbc252861b9a6e4644ca0e5abadf9a113c72d1358ad09b9500 +pygments==2.16.1 \ + --hash=sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692 \ + --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29 # via # readme-renderer # rich -pyjwt==2.6.0 \ - --hash=sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd \ - --hash=sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14 +pyjwt==2.8.0 \ + --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ + --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320 # via gcp-releasetool -pyparsing==3.0.9 \ - --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ - --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc - # via packaging pyperclip==1.8.2 \ --hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57 # via gcp-releasetool @@ -417,9 +442,9 @@ python-dateutil==2.8.2 \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 # via gcp-releasetool -readme-renderer==37.3 \ - --hash=sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273 \ - --hash=sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343 +readme-renderer==42.0 \ + --hash=sha256:13d039515c1f24de668e2c93f2e877b9dbe6c6c32328b90a40a49d8b2b85f36d \ + --hash=sha256:2d55489f83be4992fe4454939d1a051c33edbab778e82761d060c9fc6b308cd1 # via twine requests==2.31.0 \ --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ @@ -430,17 +455,17 @@ requests==2.31.0 \ # google-cloud-storage # requests-toolbelt # twine -requests-toolbelt==0.10.1 \ - --hash=sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7 \ - --hash=sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d +requests-toolbelt==1.0.0 \ + --hash=sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6 \ + --hash=sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06 # via twine rfc3986==2.0.0 \ --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \ --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c # via twine -rich==12.6.0 \ - --hash=sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e \ - --hash=sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0 +rich==13.6.0 \ + --hash=sha256:2b38e2fe9ca72c9a00170a1a2d20c63c790d0e10ef1fe35eba76e1e7b1d7d245 \ + --hash=sha256:5c14d22737e6d5084ef4771b62d5d4363165b403455a30a1c8ca39dc7b644bef # via twine rsa==4.9 \ --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ @@ -454,43 +479,37 @@ six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 # via - # bleach # gcp-docuploader - # google-auth # python-dateutil -twine==4.0.1 \ - --hash=sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e \ - --hash=sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0 +twine==4.0.2 \ + --hash=sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8 \ + --hash=sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8 # via -r requirements.in -typing-extensions==4.4.0 \ - --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ - --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e +typing-extensions==4.8.0 \ + --hash=sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0 \ + --hash=sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef # via -r requirements.in -urllib3==1.26.12 \ - --hash=sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e \ - --hash=sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997 +urllib3==2.0.7 \ + --hash=sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84 \ + --hash=sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e # via # requests # twine -virtualenv==20.16.7 \ - --hash=sha256:8691e3ff9387f743e00f6bb20f70121f5e4f596cae754531f2b3b3a1b1ac696e \ - --hash=sha256:efd66b00386fdb7dbe4822d172303f40cd05e50e01740b19ea42425cbe653e29 +virtualenv==20.24.6 \ + --hash=sha256:02ece4f56fbf939dbbc33c0715159951d6bf14aaf5457b092e4548e1382455af \ + --hash=sha256:520d056652454c5098a00c0f073611ccbea4c79089331f60bf9d7ba247bb7381 # via nox -webencodings==0.5.1 \ - --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \ - --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923 - # via bleach -wheel==0.38.4 \ - --hash=sha256:965f5259b566725405b05e7cf774052044b1ed30119b5d586b2703aafe8719ac \ - --hash=sha256:b60533f3f5d530e971d6737ca6d58681ee434818fab630c83a734bb10c083ce8 +wheel==0.41.3 \ + --hash=sha256:488609bc63a29322326e05560731bf7bfea8e48ad646e1f5e40d366607de0942 \ + --hash=sha256:4d4987ce51a49370ea65c0bfd2234e8ce80a12780820d9dc462597a6e60d0841 # via -r requirements.in -zipp==3.10.0 \ - --hash=sha256:4fcb6f278987a6605757302a6e40e896257570d11c51628968ccb2a47e80c6c1 \ - --hash=sha256:7a7262fd930bd3e36c50b9a64897aec3fafff3dfdeec9623ae22b40e93f99bb8 +zipp==3.17.0 \ + --hash=sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31 \ + --hash=sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: -setuptools==65.5.1 \ - --hash=sha256:d0b9a8433464d5800cbe05094acf5c6d52a91bfac9b52bcfc4d41382be5d5d31 \ - --hash=sha256:e197a19aa8ec9722928f2206f8de752def0e4c9fc6953527360d1c36d94ddb2f +setuptools==68.2.2 \ + --hash=sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87 \ + --hash=sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a # via -r requirements.in diff --git a/.kokoro/samples/python3.12/common.cfg b/.kokoro/samples/python3.12/common.cfg new file mode 100644 index 0000000000..4571a6d12d --- /dev/null +++ b/.kokoro/samples/python3.12/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.12" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-312" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-spanner/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-spanner/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.12/continuous.cfg b/.kokoro/samples/python3.12/continuous.cfg new file mode 100644 index 0000000000..a1c8d9759c --- /dev/null +++ b/.kokoro/samples/python3.12/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.12/periodic-head.cfg b/.kokoro/samples/python3.12/periodic-head.cfg new file mode 100644 index 0000000000..b6133a1180 --- /dev/null +++ b/.kokoro/samples/python3.12/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-spanner/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/samples/python3.12/periodic.cfg b/.kokoro/samples/python3.12/periodic.cfg new file mode 100644 index 0000000000..71cd1e597e --- /dev/null +++ b/.kokoro/samples/python3.12/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} diff --git a/.kokoro/samples/python3.12/presubmit.cfg b/.kokoro/samples/python3.12/presubmit.cfg new file mode 100644 index 0000000000..a1c8d9759c --- /dev/null +++ b/.kokoro/samples/python3.12/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 19409cbd37..6a8e169506 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: - id: end-of-file-fixer - id: check-yaml - repo: https://github.com/psf/black - rev: 22.3.0 + rev: 23.7.0 hooks: - id: black - repo: https://github.com/pycqa/flake8 diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7ce5921b04..6ee6aabfa1 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.40.1" + ".": "3.41.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9fed5da30c..cd23548f35 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,40 @@ [1]: https://pypi.org/project/google-cloud-spanner/#history +## [3.41.0](https://github.com/googleapis/python-spanner/compare/v3.40.1...v3.41.0) (2024-01-10) + + +### Features + +* Add BatchWrite API ([#1011](https://github.com/googleapis/python-spanner/issues/1011)) ([d0e4ffc](https://github.com/googleapis/python-spanner/commit/d0e4ffccea071feaa2ca012a0e3f60a945ed1a13)) +* Add PG.OID type cod annotation ([#1023](https://github.com/googleapis/python-spanner/issues/1023)) ([2d59dd0](https://github.com/googleapis/python-spanner/commit/2d59dd09b8f14a37c780d8241a76e2f109ba88b0)) +* Add support for Directed Reads ([#1000](https://github.com/googleapis/python-spanner/issues/1000)) ([c4210b2](https://github.com/googleapis/python-spanner/commit/c4210b28466cfd88fffe546140a005a8e0a1af23)) +* Add support for Python 3.12 ([#1040](https://github.com/googleapis/python-spanner/issues/1040)) ([b28dc9b](https://github.com/googleapis/python-spanner/commit/b28dc9b0f97263d3926043fe5dfcb4cdc75ab35a)) +* Batch Write API implementation and samples ([#1027](https://github.com/googleapis/python-spanner/issues/1027)) ([aa36b07](https://github.com/googleapis/python-spanner/commit/aa36b075ebb13fa952045695a8f4eb6d21ae61ff)) +* Implementation for batch dml in dbapi ([#1055](https://github.com/googleapis/python-spanner/issues/1055)) ([7a92315](https://github.com/googleapis/python-spanner/commit/7a92315c8040dbf6f652974e19cd63abfd6cda2f)) +* Implementation for Begin and Rollback clientside statements ([#1041](https://github.com/googleapis/python-spanner/issues/1041)) ([15623cd](https://github.com/googleapis/python-spanner/commit/15623cda0ac1eb5dd71434c9064134cfa7800a79)) +* Implementation for partitioned query in dbapi ([#1067](https://github.com/googleapis/python-spanner/issues/1067)) ([63daa8a](https://github.com/googleapis/python-spanner/commit/63daa8a682824609b5a21699d95b0f41930635ef)) +* Implementation of client side statements that return ([#1046](https://github.com/googleapis/python-spanner/issues/1046)) ([bb5fa1f](https://github.com/googleapis/python-spanner/commit/bb5fa1fb75dba18965cddeacd77b6af0a05b4697)) +* Implementing client side statements in dbapi (starting with commit) ([#1037](https://github.com/googleapis/python-spanner/issues/1037)) ([eb41b0d](https://github.com/googleapis/python-spanner/commit/eb41b0da7c1e60561b46811d7307e879f071c6ce)) +* Introduce compatibility with native namespace packages ([#1036](https://github.com/googleapis/python-spanner/issues/1036)) ([5d80ab0](https://github.com/googleapis/python-spanner/commit/5d80ab0794216cd093a21989be0883b02eaa437a)) +* Return list of dictionaries for execute streaming sql ([#1003](https://github.com/googleapis/python-spanner/issues/1003)) ([b534a8a](https://github.com/googleapis/python-spanner/commit/b534a8aac116a824544d63a24e38f3d484e0d207)) +* **spanner:** Add autoscaling config to the instance proto ([#1022](https://github.com/googleapis/python-spanner/issues/1022)) ([4d490cf](https://github.com/googleapis/python-spanner/commit/4d490cf9de600b16a90a1420f8773b2ae927983d)) +* **spanner:** Add directed_read_option in spanner.proto ([#1030](https://github.com/googleapis/python-spanner/issues/1030)) ([84d662b](https://github.com/googleapis/python-spanner/commit/84d662b056ca4bd4177b3107ba463302b5362ff9)) + + +### Bug Fixes + +* Executing existing DDL statements on executemany statement execution ([#1032](https://github.com/googleapis/python-spanner/issues/1032)) ([07fbc45](https://github.com/googleapis/python-spanner/commit/07fbc45156a1b42a5e61c9c4b09923f239729aa8)) +* Fix for flaky test_read_timestamp_client_side_autocommit test ([#1071](https://github.com/googleapis/python-spanner/issues/1071)) ([0406ded](https://github.com/googleapis/python-spanner/commit/0406ded8b0abcdc93a7a2422247a14260f5c620c)) +* Require google-cloud-core >= 1.4.4 ([#1015](https://github.com/googleapis/python-spanner/issues/1015)) ([a2f87b9](https://github.com/googleapis/python-spanner/commit/a2f87b9d9591562877696526634f0c7c4dd822dd)) +* Require proto-plus 1.22.2 for python 3.11 ([#880](https://github.com/googleapis/python-spanner/issues/880)) ([7debe71](https://github.com/googleapis/python-spanner/commit/7debe7194b9f56b14daeebb99f48787174a9471b)) +* Use `retry_async` instead of `retry` in async client ([#1044](https://github.com/googleapis/python-spanner/issues/1044)) ([1253ae4](https://github.com/googleapis/python-spanner/commit/1253ae46011daa3a0b939e22e957dd3ab5179210)) + + +### Documentation + +* Minor formatting ([498dba2](https://github.com/googleapis/python-spanner/commit/498dba26a7c1a1cb710a92c0167272ff5c0eef27)) + ## [3.40.1](https://github.com/googleapis/python-spanner/compare/v3.40.0...v3.40.1) (2023-08-17) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 0ea84d3216..908e1f0726 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: - 3.7, 3.8, 3.9, 3.10 and 3.11 on both UNIX and Windows. + 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests. - To run a single unit test:: - $ nox -s unit-3.11 -- -k + $ nox -s unit-3.12 -- -k .. note:: @@ -226,12 +226,14 @@ We support: - `Python 3.9`_ - `Python 3.10`_ - `Python 3.11`_ +- `Python 3.12`_ .. _Python 3.7: https://docs.python.org/3.7/ .. _Python 3.8: https://docs.python.org/3.8/ .. _Python 3.9: https://docs.python.org/3.9/ .. _Python 3.10: https://docs.python.org/3.10/ .. _Python 3.11: https://docs.python.org/3.11/ +.. _Python 3.12: https://docs.python.org/3.12/ Supported versions can be found in our ``noxfile.py`` `config`_. diff --git a/docs/index.rst b/docs/index.rst index 0e7f24d6e7..92686cc61c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -36,13 +36,13 @@ API Documentation spanner_v1/transaction spanner_v1/streamed - spanner_v1/services - spanner_v1/types - spanner_admin_database_v1/services - spanner_admin_database_v1/types + spanner_v1/services_ + spanner_v1/types_ + spanner_admin_database_v1/services_ + spanner_admin_database_v1/types_ spanner_admin_database_v1/database_admin - spanner_admin_instance_v1/services - spanner_admin_instance_v1/types + spanner_admin_instance_v1/services_ + spanner_admin_instance_v1/types_ spanner_admin_instance_v1/instance_admin diff --git a/docs/spanner_admin_database_v1/services.rst b/docs/spanner_admin_database_v1/services_.rst similarity index 100% rename from docs/spanner_admin_database_v1/services.rst rename to docs/spanner_admin_database_v1/services_.rst diff --git a/docs/spanner_admin_database_v1/types.rst b/docs/spanner_admin_database_v1/types_.rst similarity index 100% rename from docs/spanner_admin_database_v1/types.rst rename to docs/spanner_admin_database_v1/types_.rst diff --git a/docs/spanner_admin_instance_v1/services.rst b/docs/spanner_admin_instance_v1/services_.rst similarity index 100% rename from docs/spanner_admin_instance_v1/services.rst rename to docs/spanner_admin_instance_v1/services_.rst diff --git a/docs/spanner_admin_instance_v1/types.rst b/docs/spanner_admin_instance_v1/types_.rst similarity index 100% rename from docs/spanner_admin_instance_v1/types.rst rename to docs/spanner_admin_instance_v1/types_.rst diff --git a/docs/spanner_v1/services.rst b/docs/spanner_v1/services_.rst similarity index 100% rename from docs/spanner_v1/services.rst rename to docs/spanner_v1/services_.rst diff --git a/docs/spanner_v1/types.rst b/docs/spanner_v1/types_.rst similarity index 100% rename from docs/spanner_v1/types.rst rename to docs/spanner_v1/types_.rst diff --git a/google/__init__.py b/google/__init__.py deleted file mode 100644 index 2f4b4738ae..0000000000 --- a/google/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/google/cloud/__init__.py b/google/cloud/__init__.py deleted file mode 100644 index 2f4b4738ae..0000000000 --- a/google/cloud/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/google/cloud/spanner_admin_database_v1/gapic_version.py b/google/cloud/spanner_admin_database_v1/gapic_version.py index 4f879f0e40..36303c7f1a 100644 --- a/google/cloud/spanner_admin_database_v1/gapic_version.py +++ b/google/cloud/spanner_admin_database_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.40.1" # {x-release-please-version} +__version__ = "3.41.0" # {x-release-please-version} diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py index 4cd1d4756a..c0f9389db8 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py @@ -33,14 +33,14 @@ from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 -from google.api_core import retry as retries +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -51,7 +51,7 @@ from google.cloud.spanner_admin_database_v1.types import spanner_database_admin from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore @@ -299,7 +299,7 @@ async def sample_list_databases(): This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -335,7 +335,7 @@ async def sample_list_databases(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_databases, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -453,7 +453,7 @@ async def sample_create_database(): This corresponds to the ``create_statement`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -571,7 +571,7 @@ async def sample_get_database(): This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -602,7 +602,7 @@ async def sample_get_database(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_database, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -736,7 +736,7 @@ async def sample_update_database(): This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -774,7 +774,7 @@ async def sample_update_database(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_database, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -901,7 +901,7 @@ async def sample_update_database_ddl(): This corresponds to the ``statements`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -946,7 +946,7 @@ async def sample_update_database_ddl(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_database_ddl, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -1033,7 +1033,7 @@ async def sample_drop_database(): This corresponds to the ``database`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1060,7 +1060,7 @@ async def sample_drop_database(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.drop_database, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -1142,7 +1142,7 @@ async def sample_get_database_ddl(): This corresponds to the ``database`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1175,7 +1175,7 @@ async def sample_get_database_ddl(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_database_ddl, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -1257,14 +1257,14 @@ async def sample_set_iam_policy(): The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being specified. - See the operation documentation for the + policy is being specified. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1400,14 +1400,14 @@ async def sample_get_iam_policy(): The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being requested. - See the operation documentation for the + policy is being requested. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1470,7 +1470,7 @@ async def sample_get_iam_policy(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_iam_policy, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -1555,8 +1555,8 @@ async def sample_test_iam_permissions(): The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy detail is being requested. - See the operation documentation for the + policy detail is being requested. See + the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -1571,7 +1571,7 @@ async def sample_test_iam_permissions(): This corresponds to the ``permissions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1712,7 +1712,7 @@ async def sample_create_backup(): This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1883,7 +1883,7 @@ async def sample_copy_backup(): This corresponds to the ``expire_time`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2003,7 +2003,7 @@ async def sample_get_backup(): This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2034,7 +2034,7 @@ async def sample_get_backup(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_backup, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -2130,7 +2130,7 @@ async def sample_update_backup(): This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2163,7 +2163,7 @@ async def sample_update_backup(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_backup, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -2243,7 +2243,7 @@ async def sample_delete_backup(): This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2270,7 +2270,7 @@ async def sample_delete_backup(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_backup, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -2349,7 +2349,7 @@ async def sample_list_backups(): This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2385,7 +2385,7 @@ async def sample_list_backups(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_backups, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -2522,7 +2522,7 @@ async def sample_restore_database(): This corresponds to the ``backup`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2652,7 +2652,7 @@ async def sample_list_database_operations(): This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2688,7 +2688,7 @@ async def sample_list_database_operations(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_database_operations, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -2789,7 +2789,7 @@ async def sample_list_backup_operations(): This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2825,7 +2825,7 @@ async def sample_list_backup_operations(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_backup_operations, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -2917,7 +2917,7 @@ async def sample_list_database_roles(): This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2953,7 +2953,7 @@ async def sample_list_database_roles(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_database_roles, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -3007,7 +3007,7 @@ async def list_operations( request (:class:`~.operations_pb2.ListOperationsRequest`): The request object. Request message for `ListOperations` method. - retry (google.api_core.retry.Retry): Designation of what errors, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -3024,7 +3024,7 @@ async def list_operations( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( + rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_operations, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, @@ -3061,7 +3061,7 @@ async def get_operation( request (:class:`~.operations_pb2.GetOperationRequest`): The request object. Request message for `GetOperation` method. - retry (google.api_core.retry.Retry): Designation of what errors, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -3078,7 +3078,7 @@ async def get_operation( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( + rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_operation, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, @@ -3120,7 +3120,7 @@ async def delete_operation( request (:class:`~.operations_pb2.DeleteOperationRequest`): The request object. Request message for `DeleteOperation` method. - retry (google.api_core.retry.Retry): Designation of what errors, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -3136,7 +3136,7 @@ async def delete_operation( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( + rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_operation, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, @@ -3174,7 +3174,7 @@ async def cancel_operation( request (:class:`~.operations_pb2.CancelOperationRequest`): The request object. Request message for `CancelOperation` method. - retry (google.api_core.retry.Retry): Designation of what errors, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -3190,7 +3190,7 @@ async def cancel_operation( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( + rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_operation, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py index b6f2d1f1e7..39904ec05f 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py @@ -55,7 +55,7 @@ from google.cloud.spanner_admin_database_v1.types import spanner_database_admin from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore @@ -1523,8 +1523,8 @@ def sample_set_iam_policy(): The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being specified. - See the operation documentation for the + policy is being specified. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -1663,8 +1663,8 @@ def sample_get_iam_policy(): The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being requested. - See the operation documentation for the + policy is being requested. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -1805,8 +1805,8 @@ def sample_test_iam_permissions(): The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the - policy detail is being requested. - See the operation documentation for the + policy detail is being requested. See + the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py index 5f800d5063..2d2b2b5ad9 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py @@ -32,7 +32,6 @@ from google.cloud.spanner_admin_database_v1.types import spanner_database_admin from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py index a42258e96c..d518b455fa 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py @@ -30,7 +30,6 @@ from google.cloud.spanner_admin_database_v1.types import spanner_database_admin from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py index badd1058a1..ddf3d0eb53 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py @@ -30,7 +30,6 @@ from google.cloud.spanner_admin_database_v1.types import spanner_database_admin from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py index bd35307fcc..07fe33ae45 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py @@ -28,7 +28,6 @@ from google.protobuf import json_format from google.api_core import operations_v1 -from google.longrunning import operations_pb2 from requests import __version__ as requests_version import dataclasses import re @@ -46,8 +45,8 @@ from google.cloud.spanner_admin_database_v1.types import spanner_database_admin from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore from .base import ( DatabaseAdminTransport, @@ -3184,7 +3183,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -3259,7 +3257,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -3334,7 +3331,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -3413,7 +3409,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: diff --git a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py index 8ba67a4480..92f6f58613 100644 --- a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py +++ b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py @@ -131,6 +131,7 @@ class Database(proto.Message): the encryption information for the database, such as encryption state and the Cloud KMS key versions that are in use. + For databases that are using Google default or other types of encryption, this field is empty. diff --git a/google/cloud/spanner_admin_instance_v1/__init__.py b/google/cloud/spanner_admin_instance_v1/__init__.py index bf1893144c..e92a5768ad 100644 --- a/google/cloud/spanner_admin_instance_v1/__init__.py +++ b/google/cloud/spanner_admin_instance_v1/__init__.py @@ -22,6 +22,7 @@ from .services.instance_admin import InstanceAdminAsyncClient from .types.common import OperationProgress +from .types.spanner_instance_admin import AutoscalingConfig from .types.spanner_instance_admin import CreateInstanceConfigMetadata from .types.spanner_instance_admin import CreateInstanceConfigRequest from .types.spanner_instance_admin import CreateInstanceMetadata @@ -46,6 +47,7 @@ __all__ = ( "InstanceAdminAsyncClient", + "AutoscalingConfig", "CreateInstanceConfigMetadata", "CreateInstanceConfigRequest", "CreateInstanceMetadata", diff --git a/google/cloud/spanner_admin_instance_v1/gapic_version.py b/google/cloud/spanner_admin_instance_v1/gapic_version.py index 4f879f0e40..36303c7f1a 100644 --- a/google/cloud/spanner_admin_instance_v1/gapic_version.py +++ b/google/cloud/spanner_admin_instance_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.40.1" # {x-release-please-version} +__version__ = "3.41.0" # {x-release-please-version} diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py index f6dbc4e73d..a6ad4ca887 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py @@ -33,14 +33,14 @@ from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 -from google.api_core import retry as retries +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -58,10 +58,12 @@ class InstanceAdminAsyncClient: """Cloud Spanner Instance Admin API + The Cloud Spanner Instance Admin API can be used to create, delete, modify and list instances. Instances are dedicated Cloud Spanner serving and storage resources to be used by Cloud Spanner databases. + Each instance has a "configuration", which dictates where the serving resources for the Cloud Spanner instance are located (e.g., US-central, Europe). Configurations are created by Google @@ -298,7 +300,7 @@ async def sample_list_instance_configs(): This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -334,7 +336,7 @@ async def sample_list_instance_configs(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_instance_configs, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -426,7 +428,7 @@ async def sample_get_instance_config(): This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -461,7 +463,7 @@ async def sample_get_instance_config(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_instance_config, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -614,7 +616,7 @@ async def sample_create_instance_config(): This corresponds to the ``instance_config_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -803,7 +805,7 @@ async def sample_update_instance_config(): This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -928,7 +930,7 @@ async def sample_delete_instance_config(): This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1035,7 +1037,7 @@ async def sample_list_instance_config_operations(): This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1153,7 +1155,7 @@ async def sample_list_instances(): This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1189,7 +1191,7 @@ async def sample_list_instances(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_instances, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -1279,7 +1281,7 @@ async def sample_get_instance(): This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1313,7 +1315,7 @@ async def sample_get_instance(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_instance, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -1460,7 +1462,7 @@ async def sample_create_instance(): This corresponds to the ``instance`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1649,7 +1651,7 @@ async def sample_update_instance(): This corresponds to the ``field_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1777,7 +1779,7 @@ async def sample_delete_instance(): This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1804,7 +1806,7 @@ async def sample_delete_instance(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_instance, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -1879,14 +1881,14 @@ async def sample_set_iam_policy(): The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being specified. - See the operation documentation for the + policy is being specified. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2018,14 +2020,14 @@ async def sample_get_iam_policy(): The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being requested. - See the operation documentation for the + policy is being requested. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2088,7 +2090,7 @@ async def sample_get_iam_policy(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_iam_policy, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=32.0, multiplier=1.3, @@ -2170,8 +2172,8 @@ async def sample_test_iam_permissions(): The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy detail is being requested. - See the operation documentation for the + policy detail is being requested. See + the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2186,7 +2188,7 @@ async def sample_test_iam_permissions(): This corresponds to the ``permissions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py index dd94cacafb..cab796f644 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py @@ -98,10 +98,12 @@ def get_transport_class( class InstanceAdminClient(metaclass=InstanceAdminClientMeta): """Cloud Spanner Instance Admin API + The Cloud Spanner Instance Admin API can be used to create, delete, modify and list instances. Instances are dedicated Cloud Spanner serving and storage resources to be used by Cloud Spanner databases. + Each instance has a "configuration", which dictates where the serving resources for the Cloud Spanner instance are located (e.g., US-central, Europe). Configurations are created by Google @@ -2073,8 +2075,8 @@ def sample_set_iam_policy(): The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being specified. - See the operation documentation for the + policy is being specified. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2209,8 +2211,8 @@ def sample_get_iam_policy(): The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being requested. - See the operation documentation for the + policy is being requested. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2348,8 +2350,8 @@ def sample_test_iam_permissions(): The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the - policy detail is being requested. - See the operation documentation for the + policy detail is being requested. See + the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py index 4e5be0b229..03fef980e6 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py @@ -37,10 +37,12 @@ class InstanceAdminGrpcTransport(InstanceAdminTransport): """gRPC backend transport for InstanceAdmin. Cloud Spanner Instance Admin API + The Cloud Spanner Instance Admin API can be used to create, delete, modify and list instances. Instances are dedicated Cloud Spanner serving and storage resources to be used by Cloud Spanner databases. + Each instance has a "configuration", which dictates where the serving resources for the Cloud Spanner instance are located (e.g., US-central, Europe). Configurations are created by Google diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py index b04bc2543b..a5ff6d1635 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py @@ -38,10 +38,12 @@ class InstanceAdminGrpcAsyncIOTransport(InstanceAdminTransport): """gRPC AsyncIO backend transport for InstanceAdmin. Cloud Spanner Instance Admin API + The Cloud Spanner Instance Admin API can be used to create, delete, modify and list instances. Instances are dedicated Cloud Spanner serving and storage resources to be used by Cloud Spanner databases. + Each instance has a "configuration", which dictates where the serving resources for the Cloud Spanner instance are located (e.g., US-central, Europe). Configurations are created by Google diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py index c743fa011d..2ba6d65087 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py @@ -43,8 +43,8 @@ from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore from .base import ( InstanceAdminTransport, @@ -505,10 +505,12 @@ class InstanceAdminRestTransport(InstanceAdminTransport): """REST backend transport for InstanceAdmin. Cloud Spanner Instance Admin API + The Cloud Spanner Instance Admin API can be used to create, delete, modify and list instances. Instances are dedicated Cloud Spanner serving and storage resources to be used by Cloud Spanner databases. + Each instance has a "configuration", which dictates where the serving resources for the Cloud Spanner instance are located (e.g., US-central, Europe). Configurations are created by Google diff --git a/google/cloud/spanner_admin_instance_v1/types/__init__.py b/google/cloud/spanner_admin_instance_v1/types/__init__.py index 3ee4fcb10a..b4eaac8066 100644 --- a/google/cloud/spanner_admin_instance_v1/types/__init__.py +++ b/google/cloud/spanner_admin_instance_v1/types/__init__.py @@ -17,6 +17,7 @@ OperationProgress, ) from .spanner_instance_admin import ( + AutoscalingConfig, CreateInstanceConfigMetadata, CreateInstanceConfigRequest, CreateInstanceMetadata, @@ -42,6 +43,7 @@ __all__ = ( "OperationProgress", + "AutoscalingConfig", "CreateInstanceConfigMetadata", "CreateInstanceConfigRequest", "CreateInstanceMetadata", diff --git a/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py b/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py index 394e799d05..b4c18b85f2 100644 --- a/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py +++ b/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py @@ -30,6 +30,7 @@ manifest={ "ReplicaInfo", "InstanceConfig", + "AutoscalingConfig", "Instance", "ListInstanceConfigsRequest", "ListInstanceConfigsResponse", @@ -297,6 +298,116 @@ class State(proto.Enum): ) +class AutoscalingConfig(proto.Message): + r"""Autoscaling config for an instance. + + Attributes: + autoscaling_limits (google.cloud.spanner_admin_instance_v1.types.AutoscalingConfig.AutoscalingLimits): + Required. Autoscaling limits for an instance. + autoscaling_targets (google.cloud.spanner_admin_instance_v1.types.AutoscalingConfig.AutoscalingTargets): + Required. The autoscaling targets for an + instance. + """ + + class AutoscalingLimits(proto.Message): + r"""The autoscaling limits for the instance. Users can define the + minimum and maximum compute capacity allocated to the instance, and + the autoscaler will only scale within that range. Users can either + use nodes or processing units to specify the limits, but should use + the same unit to set both the min_limit and max_limit. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + min_nodes (int): + Minimum number of nodes allocated to the + instance. If set, this number should be greater + than or equal to 1. + + This field is a member of `oneof`_ ``min_limit``. + min_processing_units (int): + Minimum number of processing units allocated + to the instance. If set, this number should be + multiples of 1000. + + This field is a member of `oneof`_ ``min_limit``. + max_nodes (int): + Maximum number of nodes allocated to the instance. If set, + this number should be greater than or equal to min_nodes. + + This field is a member of `oneof`_ ``max_limit``. + max_processing_units (int): + Maximum number of processing units allocated to the + instance. If set, this number should be multiples of 1000 + and be greater than or equal to min_processing_units. + + This field is a member of `oneof`_ ``max_limit``. + """ + + min_nodes: int = proto.Field( + proto.INT32, + number=1, + oneof="min_limit", + ) + min_processing_units: int = proto.Field( + proto.INT32, + number=2, + oneof="min_limit", + ) + max_nodes: int = proto.Field( + proto.INT32, + number=3, + oneof="max_limit", + ) + max_processing_units: int = proto.Field( + proto.INT32, + number=4, + oneof="max_limit", + ) + + class AutoscalingTargets(proto.Message): + r"""The autoscaling targets for an instance. + + Attributes: + high_priority_cpu_utilization_percent (int): + Required. The target high priority cpu utilization + percentage that the autoscaler should be trying to achieve + for the instance. This number is on a scale from 0 (no + utilization) to 100 (full utilization). The valid range is + [10, 90] inclusive. + storage_utilization_percent (int): + Required. The target storage utilization percentage that the + autoscaler should be trying to achieve for the instance. + This number is on a scale from 0 (no utilization) to 100 + (full utilization). The valid range is [10, 100] inclusive. + """ + + high_priority_cpu_utilization_percent: int = proto.Field( + proto.INT32, + number=1, + ) + storage_utilization_percent: int = proto.Field( + proto.INT32, + number=2, + ) + + autoscaling_limits: AutoscalingLimits = proto.Field( + proto.MESSAGE, + number=1, + message=AutoscalingLimits, + ) + autoscaling_targets: AutoscalingTargets = proto.Field( + proto.MESSAGE, + number=2, + message=AutoscalingTargets, + ) + + class Instance(proto.Message): r"""An isolated set of Cloud Spanner resources on which databases can be hosted. @@ -325,8 +436,13 @@ class Instance(proto.Message): node_count (int): The number of nodes allocated to this instance. At most one of either node_count or processing_units should be present - in the message. This may be zero in API responses for - instances that are not yet in state ``READY``. + in the message. + + Users can set the node_count field to specify the target + number of nodes allocated to the instance. + + This may be zero in API responses for instances that are not + yet in state ``READY``. See `the documentation `__ @@ -334,12 +450,23 @@ class Instance(proto.Message): processing_units (int): The number of processing units allocated to this instance. At most one of processing_units or node_count should be - present in the message. This may be zero in API responses - for instances that are not yet in state ``READY``. + present in the message. + + Users can set the processing_units field to specify the + target number of processing units allocated to the instance. + + This may be zero in API responses for instances that are not + yet in state ``READY``. See `the documentation `__ for more information about nodes and processing units. + autoscaling_config (google.cloud.spanner_admin_instance_v1.types.AutoscalingConfig): + Optional. The autoscaling configuration. Autoscaling is + enabled if this field is set. When autoscaling is enabled, + node_count and processing_units are treated as OUTPUT_ONLY + fields and reflect the current compute capacity allocated to + the instance. state (google.cloud.spanner_admin_instance_v1.types.Instance.State): Output only. The current instance state. For [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance], @@ -424,6 +551,11 @@ class State(proto.Enum): proto.INT32, number=9, ) + autoscaling_config: "AutoscalingConfig" = proto.Field( + proto.MESSAGE, + number=17, + message="AutoscalingConfig", + ) state: State = proto.Field( proto.ENUM, number=6, diff --git a/google/cloud/spanner_dbapi/batch_dml_executor.py b/google/cloud/spanner_dbapi/batch_dml_executor.py new file mode 100644 index 0000000000..f91cf37b59 --- /dev/null +++ b/google/cloud/spanner_dbapi/batch_dml_executor.py @@ -0,0 +1,131 @@ +# Copyright 2023 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from enum import Enum +from typing import TYPE_CHECKING, List +from google.cloud.spanner_dbapi.checksum import ResultsChecksum +from google.cloud.spanner_dbapi.parsed_statement import ( + ParsedStatement, + StatementType, + Statement, +) +from google.rpc.code_pb2 import ABORTED, OK +from google.api_core.exceptions import Aborted + +from google.cloud.spanner_dbapi.utils import StreamedManyResultSets + +if TYPE_CHECKING: + from google.cloud.spanner_dbapi.cursor import Cursor + + +class BatchDmlExecutor: + """Executor that is used when a DML batch is started. These batches only + accept DML statements. All DML statements are buffered locally and sent to + Spanner when runBatch() is called. + + :type "Cursor": :class:`~google.cloud.spanner_dbapi.cursor.Cursor` + :param cursor: + """ + + def __init__(self, cursor: "Cursor"): + self._cursor = cursor + self._connection = cursor.connection + self._statements: List[Statement] = [] + + def execute_statement(self, parsed_statement: ParsedStatement): + """Executes the statement when dml batch is active by buffering the + statement in-memory. + + :type parsed_statement: ParsedStatement + :param parsed_statement: parsed statement containing sql query and query + params + """ + from google.cloud.spanner_dbapi import ProgrammingError + + if ( + parsed_statement.statement_type != StatementType.UPDATE + and parsed_statement.statement_type != StatementType.INSERT + ): + raise ProgrammingError("Only DML statements are allowed in batch DML mode.") + self._statements.append(parsed_statement.statement) + + def run_batch_dml(self): + """Executes all the buffered statements on the active dml batch by + making a call to Spanner. + """ + return run_batch_dml(self._cursor, self._statements) + + +def run_batch_dml(cursor: "Cursor", statements: List[Statement]): + """Executes all the dml statements by making a batch call to Spanner. + + :type cursor: Cursor + :param cursor: Database Cursor object + + :type statements: List[Statement] + :param statements: list of statements to execute in batch + """ + from google.cloud.spanner_dbapi import OperationalError + + connection = cursor.connection + many_result_set = StreamedManyResultSets() + statements_tuple = [] + for statement in statements: + statements_tuple.append(statement.get_tuple()) + if not connection._client_transaction_started: + res = connection.database.run_in_transaction(_do_batch_update, statements_tuple) + many_result_set.add_iter(res) + cursor._row_count = sum([max(val, 0) for val in res]) + else: + retried = False + while True: + try: + transaction = connection.transaction_checkout() + status, res = transaction.batch_update(statements_tuple) + many_result_set.add_iter(res) + res_checksum = ResultsChecksum() + res_checksum.consume_result(res) + res_checksum.consume_result(status.code) + if not retried: + connection._statements.append((statements, res_checksum)) + cursor._row_count = sum([max(val, 0) for val in res]) + + if status.code == ABORTED: + connection._transaction = None + raise Aborted(status.message) + elif status.code != OK: + raise OperationalError(status.message) + return many_result_set + except Aborted: + connection.retry_transaction() + retried = True + + +def _do_batch_update(transaction, statements): + from google.cloud.spanner_dbapi import OperationalError + + status, res = transaction.batch_update(statements) + if status.code == ABORTED: + raise Aborted(status.message) + elif status.code != OK: + raise OperationalError(status.message) + return res + + +class BatchMode(Enum): + DML = 1 + DDL = 2 + NONE = 3 diff --git a/google/cloud/spanner_dbapi/client_side_statement_executor.py b/google/cloud/spanner_dbapi/client_side_statement_executor.py new file mode 100644 index 0000000000..4d3408218c --- /dev/null +++ b/google/cloud/spanner_dbapi/client_side_statement_executor.py @@ -0,0 +1,119 @@ +# Copyright 2023 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from google.cloud.spanner_dbapi.cursor import Cursor + from google.cloud.spanner_dbapi import ProgrammingError + +from google.cloud.spanner_dbapi.parsed_statement import ( + ParsedStatement, + ClientSideStatementType, +) +from google.cloud.spanner_v1 import ( + Type, + StructType, + TypeCode, + ResultSetMetadata, + PartialResultSet, +) + +from google.cloud.spanner_v1._helpers import _make_value_pb +from google.cloud.spanner_v1.streamed import StreamedResultSet + +CONNECTION_CLOSED_ERROR = "This connection is closed" +TRANSACTION_NOT_STARTED_WARNING = ( + "This method is non-operational as a transaction has not been started." +) + + +def execute(cursor: "Cursor", parsed_statement: ParsedStatement): + """Executes the client side statements by calling the relevant method. + + It is an internal method that can make backwards-incompatible changes. + + :type cursor: Cursor + :param cursor: Cursor object of the dbApi + + :type parsed_statement: ParsedStatement + :param parsed_statement: parsed_statement based on the sql query + """ + connection = cursor.connection + column_values = [] + if connection.is_closed: + raise ProgrammingError(CONNECTION_CLOSED_ERROR) + statement_type = parsed_statement.client_side_statement_type + if statement_type == ClientSideStatementType.COMMIT: + connection.commit() + return None + if statement_type == ClientSideStatementType.BEGIN: + connection.begin() + return None + if statement_type == ClientSideStatementType.ROLLBACK: + connection.rollback() + return None + if statement_type == ClientSideStatementType.SHOW_COMMIT_TIMESTAMP: + if ( + connection._transaction is not None + and connection._transaction.committed is not None + ): + column_values.append(connection._transaction.committed) + return _get_streamed_result_set( + ClientSideStatementType.SHOW_COMMIT_TIMESTAMP.name, + TypeCode.TIMESTAMP, + column_values, + ) + if statement_type == ClientSideStatementType.SHOW_READ_TIMESTAMP: + if ( + connection._snapshot is not None + and connection._snapshot._transaction_read_timestamp is not None + ): + column_values.append(connection._snapshot._transaction_read_timestamp) + return _get_streamed_result_set( + ClientSideStatementType.SHOW_READ_TIMESTAMP.name, + TypeCode.TIMESTAMP, + column_values, + ) + if statement_type == ClientSideStatementType.START_BATCH_DML: + connection.start_batch_dml(cursor) + return None + if statement_type == ClientSideStatementType.RUN_BATCH: + return connection.run_batch() + if statement_type == ClientSideStatementType.ABORT_BATCH: + return connection.abort_batch() + if statement_type == ClientSideStatementType.PARTITION_QUERY: + partition_ids = connection.partition_query(parsed_statement) + return _get_streamed_result_set( + "PARTITION", + TypeCode.STRING, + partition_ids, + ) + if statement_type == ClientSideStatementType.RUN_PARTITION: + return connection.run_partition( + parsed_statement.client_side_statement_params[0] + ) + + +def _get_streamed_result_set(column_name, type_code, column_values): + struct_type_pb = StructType( + fields=[StructType.Field(name=column_name, type_=Type(code=type_code))] + ) + + result_set = PartialResultSet(metadata=ResultSetMetadata(row_type=struct_type_pb)) + if len(column_values) > 0: + column_values_pb = [] + for column_value in column_values: + column_values_pb.append(_make_value_pb(column_value)) + result_set.values.extend(column_values_pb) + return StreamedResultSet(iter([result_set])) diff --git a/google/cloud/spanner_dbapi/client_side_statement_parser.py b/google/cloud/spanner_dbapi/client_side_statement_parser.py new file mode 100644 index 0000000000..04a3cc523c --- /dev/null +++ b/google/cloud/spanner_dbapi/client_side_statement_parser.py @@ -0,0 +1,85 @@ +# Copyright 2023 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +from google.cloud.spanner_dbapi.parsed_statement import ( + ParsedStatement, + StatementType, + ClientSideStatementType, + Statement, +) + +RE_BEGIN = re.compile(r"^\s*(BEGIN|START)(TRANSACTION)?", re.IGNORECASE) +RE_COMMIT = re.compile(r"^\s*(COMMIT)(TRANSACTION)?", re.IGNORECASE) +RE_ROLLBACK = re.compile(r"^\s*(ROLLBACK)(TRANSACTION)?", re.IGNORECASE) +RE_SHOW_COMMIT_TIMESTAMP = re.compile( + r"^\s*(SHOW)\s+(VARIABLE)\s+(COMMIT_TIMESTAMP)", re.IGNORECASE +) +RE_SHOW_READ_TIMESTAMP = re.compile( + r"^\s*(SHOW)\s+(VARIABLE)\s+(READ_TIMESTAMP)", re.IGNORECASE +) +RE_START_BATCH_DML = re.compile(r"^\s*(START)\s+(BATCH)\s+(DML)", re.IGNORECASE) +RE_RUN_BATCH = re.compile(r"^\s*(RUN)\s+(BATCH)", re.IGNORECASE) +RE_ABORT_BATCH = re.compile(r"^\s*(ABORT)\s+(BATCH)", re.IGNORECASE) +RE_PARTITION_QUERY = re.compile(r"^\s*(PARTITION)\s+(.+)", re.IGNORECASE) +RE_RUN_PARTITION = re.compile(r"^\s*(RUN)\s+(PARTITION)\s+(.+)", re.IGNORECASE) + + +def parse_stmt(query): + """Parses the sql query to check if it matches with any of the client side + statement regex. + + It is an internal method that can make backwards-incompatible changes. + + :type query: str + :param query: sql query + + :rtype: ParsedStatement + :returns: ParsedStatement object. + """ + client_side_statement_type = None + client_side_statement_params = [] + if RE_COMMIT.match(query): + client_side_statement_type = ClientSideStatementType.COMMIT + if RE_BEGIN.match(query): + client_side_statement_type = ClientSideStatementType.BEGIN + if RE_ROLLBACK.match(query): + client_side_statement_type = ClientSideStatementType.ROLLBACK + if RE_SHOW_COMMIT_TIMESTAMP.match(query): + client_side_statement_type = ClientSideStatementType.SHOW_COMMIT_TIMESTAMP + if RE_SHOW_READ_TIMESTAMP.match(query): + client_side_statement_type = ClientSideStatementType.SHOW_READ_TIMESTAMP + if RE_START_BATCH_DML.match(query): + client_side_statement_type = ClientSideStatementType.START_BATCH_DML + if RE_RUN_BATCH.match(query): + client_side_statement_type = ClientSideStatementType.RUN_BATCH + if RE_ABORT_BATCH.match(query): + client_side_statement_type = ClientSideStatementType.ABORT_BATCH + if RE_PARTITION_QUERY.match(query): + match = re.search(RE_PARTITION_QUERY, query) + client_side_statement_params.append(match.group(2)) + client_side_statement_type = ClientSideStatementType.PARTITION_QUERY + if RE_RUN_PARTITION.match(query): + match = re.search(RE_RUN_PARTITION, query) + client_side_statement_params.append(match.group(3)) + client_side_statement_type = ClientSideStatementType.RUN_PARTITION + if client_side_statement_type is not None: + return ParsedStatement( + StatementType.CLIENT_SIDE, + Statement(query), + client_side_statement_type, + client_side_statement_params, + ) + return None diff --git a/google/cloud/spanner_dbapi/connection.py b/google/cloud/spanner_dbapi/connection.py index efbdc80f3f..47680fd550 100644 --- a/google/cloud/spanner_dbapi/connection.py +++ b/google/cloud/spanner_dbapi/connection.py @@ -13,28 +13,43 @@ # limitations under the License. """DB-API Connection for the Google Cloud Spanner.""" - import time import warnings from google.api_core.exceptions import Aborted from google.api_core.gapic_v1.client_info import ClientInfo from google.cloud import spanner_v1 as spanner +from google.cloud.spanner_dbapi import partition_helper +from google.cloud.spanner_dbapi.batch_dml_executor import BatchMode, BatchDmlExecutor +from google.cloud.spanner_dbapi.parse_utils import _get_statement_type +from google.cloud.spanner_dbapi.parsed_statement import ( + ParsedStatement, + Statement, + StatementType, +) +from google.cloud.spanner_dbapi.partition_helper import PartitionId from google.cloud.spanner_v1 import RequestOptions from google.cloud.spanner_v1.session import _get_retry_delay from google.cloud.spanner_v1.snapshot import Snapshot +from deprecated import deprecated from google.cloud.spanner_dbapi.checksum import _compare_checksums from google.cloud.spanner_dbapi.checksum import ResultsChecksum from google.cloud.spanner_dbapi.cursor import Cursor -from google.cloud.spanner_dbapi.exceptions import InterfaceError, OperationalError +from google.cloud.spanner_dbapi.exceptions import ( + InterfaceError, + OperationalError, + ProgrammingError, +) from google.cloud.spanner_dbapi.version import DEFAULT_USER_AGENT from google.cloud.spanner_dbapi.version import PY_VERSION from google.rpc.code_pb2 import ABORTED -AUTOCOMMIT_MODE_WARNING = "This method is non-operational in autocommit mode" +CLIENT_TRANSACTION_NOT_STARTED_WARNING = ( + "This method is non-operational as a transaction has not been started." +) MAX_INTERNAL_RETRIES = 50 @@ -104,6 +119,12 @@ def __init__(self, instance, database=None, read_only=False): self._read_only = read_only self._staleness = None self.request_priority = None + self._transaction_begin_marked = False + # whether transaction started at Spanner. This means that we had + # made atleast one call to Spanner. + self._spanner_transaction_started = False + self._batch_mode = BatchMode.NONE + self._batch_dml_executor: BatchDmlExecutor = None @property def autocommit(self): @@ -122,7 +143,7 @@ def autocommit(self, value): :type value: bool :param value: New autocommit mode state. """ - if value and not self._autocommit and self.inside_transaction: + if value and not self._autocommit and self._spanner_transaction_started: self.commit() self._autocommit = value @@ -137,18 +158,25 @@ def database(self): return self._database @property + @deprecated( + reason="This method is deprecated. Use _spanner_transaction_started field" + ) def inside_transaction(self): - """Flag: transaction is started. - - Returns: - bool: True if transaction begun, False otherwise. - """ return ( self._transaction and not self._transaction.committed and not self._transaction.rolled_back ) + @property + def _client_transaction_started(self): + """Flag: whether transaction started at client side. + + Returns: + bool: True if transaction started, False otherwise. + """ + return (not self._autocommit) or self._transaction_begin_marked + @property def instance(self): """Instance to which this connection relates. @@ -175,7 +203,7 @@ def read_only(self, value): Args: value (bool): True for ReadOnly mode, False for ReadWrite. """ - if self.inside_transaction: + if self._spanner_transaction_started: raise ValueError( "Connection read/write mode can't be changed while a transaction is in progress. " "Commit or rollback the current transaction and try again." @@ -213,7 +241,7 @@ def staleness(self, value): Args: value (dict): Staleness type and value. """ - if self.inside_transaction: + if self._spanner_transaction_started: raise ValueError( "`staleness` option can't be changed while a transaction is in progress. " "Commit or rollback the current transaction and try again." @@ -256,7 +284,8 @@ def _release_session(self): """ if self.database is None: raise ValueError("Database needs to be passed for this operation") - self.database._pool.put(self._session) + if self._session is not None: + self.database._pool.put(self._session) self._session = None def retry_transaction(self): @@ -272,7 +301,7 @@ def retry_transaction(self): """ attempt = 0 while True: - self._transaction = None + self._spanner_transaction_started = False attempt += 1 if attempt > MAX_INTERNAL_RETRIES: raise @@ -295,10 +324,12 @@ def _rerun_previous_statements(self): statements, checksum = statement transaction = self.transaction_checkout() - status, res = transaction.batch_update(statements) + statements_tuple = [] + for single_statement in statements: + statements_tuple.append(single_statement.get_tuple()) + status, res = transaction.batch_update(statements_tuple) if status.code == ABORTED: - self.connection._transaction = None raise Aborted(status.details) retried_checksum = ResultsChecksum() @@ -331,16 +362,19 @@ def transaction_checkout(self): """Get a Cloud Spanner transaction. Begin a new transaction, if there is no transaction in - this connection yet. Return the begun one otherwise. + this connection yet. Return the started one otherwise. - The method is non operational in autocommit mode. + This method is a no-op if the connection is in autocommit mode and no + explicit transaction has been started :rtype: :class:`google.cloud.spanner_v1.transaction.Transaction` :returns: A Cloud Spanner transaction object, ready to use. """ - if not self.autocommit: - if not self.inside_transaction: + if not self.read_only and self._client_transaction_started: + if not self._spanner_transaction_started: self._transaction = self._session_checkout().transaction() + self._snapshot = None + self._spanner_transaction_started = True self._transaction.begin() return self._transaction @@ -354,12 +388,14 @@ def snapshot_checkout(self): :rtype: :class:`google.cloud.spanner_v1.snapshot.Snapshot` :returns: A Cloud Spanner snapshot object, ready to use. """ - if self.read_only and not self.autocommit: - if not self._snapshot: + if self.read_only and self._client_transaction_started: + if not self._spanner_transaction_started: self._snapshot = Snapshot( self._session_checkout(), multi_use=True, **self.staleness ) + self._transaction = None self._snapshot.begin() + self._spanner_transaction_started = True return self._snapshot @@ -369,7 +405,7 @@ def close(self): The connection will be unusable from this point forward. If the connection has an active transaction, it will be rolled back. """ - if self.inside_transaction: + if self._spanner_transaction_started and not self._read_only: self._transaction.rollback() if self._own_pool and self.database: @@ -377,47 +413,70 @@ def close(self): self.is_closed = True + @check_not_closed + def begin(self): + """ + Marks the transaction as started. + + :raises: :class:`InterfaceError`: if this connection is closed. + :raises: :class:`OperationalError`: if there is an existing transaction + that has been started + """ + if self._transaction_begin_marked: + raise OperationalError("A transaction has already started") + if self._spanner_transaction_started: + raise OperationalError( + "Beginning a new transaction is not allowed when a transaction " + "is already running" + ) + self._transaction_begin_marked = True + def commit(self): """Commits any pending transaction to the database. - This method is non-operational in autocommit mode. + This is a no-op if there is no active client transaction. """ if self.database is None: raise ValueError("Database needs to be passed for this operation") - self._snapshot = None - if self._autocommit: - warnings.warn(AUTOCOMMIT_MODE_WARNING, UserWarning, stacklevel=2) + if not self._client_transaction_started: + warnings.warn( + CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2 + ) return self.run_prior_DDL_statements() - if self.inside_transaction: - try: - if not self.read_only: - self._transaction.commit() - - self._release_session() - self._statements = [] - except Aborted: - self.retry_transaction() - self.commit() + try: + if self._spanner_transaction_started and not self._read_only: + self._transaction.commit() + except Aborted: + self.retry_transaction() + self.commit() + finally: + self._release_session() + self._statements = [] + self._transaction_begin_marked = False + self._spanner_transaction_started = False def rollback(self): """Rolls back any pending transaction. - This is a no-op if there is no active transaction or if the connection - is in autocommit mode. + This is a no-op if there is no active client transaction. """ - self._snapshot = None + if not self._client_transaction_started: + warnings.warn( + CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2 + ) + return - if self._autocommit: - warnings.warn(AUTOCOMMIT_MODE_WARNING, UserWarning, stacklevel=2) - elif self._transaction: - if not self.read_only: + try: + if self._spanner_transaction_started and not self._read_only: self._transaction.rollback() - + finally: self._release_session() self._statements = [] + self._transaction_begin_marked = False + self._spanner_transaction_started = False @check_not_closed def cursor(self): @@ -434,14 +493,14 @@ def run_prior_DDL_statements(self): return self.database.update_ddl(ddl_statements).result() - def run_statement(self, statement, retried=False): + def run_statement(self, statement: Statement, retried=False): """Run single SQL statement in begun transaction. This method is never used in autocommit mode. In !autocommit mode however it remembers every executed SQL statement with its parameters. - :type statement: :class:`dict` + :type statement: :class:`Statement` :param statement: SQL statement to execute. :type retried: bool @@ -492,6 +551,95 @@ def validate(self): "Expected: [[1]]" % result ) + @check_not_closed + def start_batch_dml(self, cursor): + if self._batch_mode is not BatchMode.NONE: + raise ProgrammingError( + "Cannot start a DML batch when a batch is already active" + ) + if self.read_only: + raise ProgrammingError( + "Cannot start a DML batch when the connection is in read-only mode" + ) + self._batch_mode = BatchMode.DML + self._batch_dml_executor = BatchDmlExecutor(cursor) + + @check_not_closed + def execute_batch_dml_statement(self, parsed_statement: ParsedStatement): + if self._batch_mode is not BatchMode.DML: + raise ProgrammingError( + "Cannot execute statement when the BatchMode is not DML" + ) + self._batch_dml_executor.execute_statement(parsed_statement) + + @check_not_closed + def run_batch(self): + if self._batch_mode is BatchMode.NONE: + raise ProgrammingError("Cannot run a batch when the BatchMode is not set") + try: + if self._batch_mode is BatchMode.DML: + many_result_set = self._batch_dml_executor.run_batch_dml() + finally: + self._batch_mode = BatchMode.NONE + self._batch_dml_executor = None + return many_result_set + + @check_not_closed + def abort_batch(self): + if self._batch_mode is BatchMode.NONE: + raise ProgrammingError("Cannot abort a batch when the BatchMode is not set") + if self._batch_mode is BatchMode.DML: + self._batch_dml_executor = None + self._batch_mode = BatchMode.NONE + + @check_not_closed + def partition_query( + self, + parsed_statement: ParsedStatement, + query_options=None, + ): + statement = parsed_statement.statement + partitioned_query = parsed_statement.client_side_statement_params[0] + if _get_statement_type(Statement(partitioned_query)) is not StatementType.QUERY: + raise ProgrammingError( + "Only queries can be partitioned. Invalid statement: " + statement.sql + ) + if self.read_only is not True and self._client_transaction_started is True: + raise ProgrammingError( + "Partitioned query not supported as the connection is not in " + "read only mode or ReadWrite transaction started" + ) + + batch_snapshot = self._database.batch_snapshot() + partition_ids = [] + partitions = list( + batch_snapshot.generate_query_batches( + partitioned_query, + statement.params, + statement.param_types, + query_options=query_options, + ) + ) + for partition in partitions: + batch_transaction_id = batch_snapshot.get_batch_transaction_id() + partition_ids.append( + partition_helper.encode_to_string(batch_transaction_id, partition) + ) + return partition_ids + + @check_not_closed + def run_partition(self, batch_transaction_id): + partition_id: PartitionId = partition_helper.decode_from_string( + batch_transaction_id + ) + batch_transaction_id = partition_id.batch_transaction_id + batch_snapshot = self._database.batch_snapshot( + read_timestamp=batch_transaction_id.read_timestamp, + session_id=batch_transaction_id.session_id, + transaction_id=batch_transaction_id.transaction_id, + ) + return batch_snapshot.process(partition_id.partition_result) + def __enter__(self): return self diff --git a/google/cloud/spanner_dbapi/cursor.py b/google/cloud/spanner_dbapi/cursor.py index 91bccedd4c..ff91e9e666 100644 --- a/google/cloud/spanner_dbapi/cursor.py +++ b/google/cloud/spanner_dbapi/cursor.py @@ -26,28 +26,33 @@ from google.api_core.exceptions import OutOfRange from google.cloud import spanner_v1 as spanner -from google.cloud.spanner_dbapi.checksum import ResultsChecksum +from google.cloud.spanner_dbapi.batch_dml_executor import BatchMode from google.cloud.spanner_dbapi.exceptions import IntegrityError from google.cloud.spanner_dbapi.exceptions import InterfaceError from google.cloud.spanner_dbapi.exceptions import OperationalError from google.cloud.spanner_dbapi.exceptions import ProgrammingError -from google.cloud.spanner_dbapi import _helpers +from google.cloud.spanner_dbapi import ( + _helpers, + client_side_statement_executor, + batch_dml_executor, +) from google.cloud.spanner_dbapi._helpers import ColumnInfo from google.cloud.spanner_dbapi._helpers import CODE_TO_DISPLAY_SIZE from google.cloud.spanner_dbapi import parse_utils from google.cloud.spanner_dbapi.parse_utils import get_param_types -from google.cloud.spanner_dbapi.parse_utils import sql_pyformat_args_to_spanner +from google.cloud.spanner_dbapi.parsed_statement import ( + StatementType, + Statement, + ParsedStatement, +) from google.cloud.spanner_dbapi.utils import PeekIterator from google.cloud.spanner_dbapi.utils import StreamedManyResultSets -from google.rpc.code_pb2 import ABORTED, OK - _UNSET_COUNT = -1 ColumnDetails = namedtuple("column_details", ["null_ok", "spanner_type"]) -Statement = namedtuple("Statement", "sql, params, param_types, checksum") def check_not_closed(function): @@ -177,24 +182,16 @@ def close(self): """Closes this cursor.""" self._is_closed = True - def _do_execute_update(self, transaction, sql, params): + def _do_execute_update_in_autocommit(self, transaction, sql, params): + """This function should only be used in autocommit mode.""" + self.connection._transaction = transaction + self.connection._snapshot = None self._result_set = transaction.execute_sql( sql, params=params, param_types=get_param_types(params) ) self._itr = PeekIterator(self._result_set) self._row_count = _UNSET_COUNT - def _do_batch_update(self, transaction, statements, many_result_set): - status, res = transaction.batch_update(statements) - many_result_set.add_iter(res) - - if status.code == ABORTED: - raise Aborted(status.message) - elif status.code != OK: - raise OperationalError(status.message) - - self._row_count = sum([max(val, 0) for val in res]) - def _batch_DDLs(self, sql): """ Check that the given operation contains only DDL @@ -210,7 +207,10 @@ def _batch_DDLs(self, sql): for ddl in sqlparse.split(sql): if ddl: ddl = ddl.rstrip(";") - if parse_utils.classify_stmt(ddl) != parse_utils.STMT_DDL: + if ( + parse_utils.classify_statement(ddl).statement_type + != StatementType.DDL + ): raise ValueError("Only DDL statements may be batched.") statements.append(ddl) @@ -235,61 +235,67 @@ def execute(self, sql, args=None): self._row_count = _UNSET_COUNT try: - if self.connection.read_only: - self._handle_DQL(sql, args or None) - return - - class_ = parse_utils.classify_stmt(sql) - if class_ == parse_utils.STMT_DDL: - self._batch_DDLs(sql) - if self.connection.autocommit: - self.connection.run_prior_DDL_statements() - return - - # For every other operation, we've got to ensure that - # any prior DDL statements were run. - # self._run_prior_DDL_statements() - self.connection.run_prior_DDL_statements() - - if class_ == parse_utils.STMT_UPDATING: - sql = parse_utils.ensure_where_clause(sql) - - sql, args = sql_pyformat_args_to_spanner(sql, args or None) - - if not self.connection.autocommit: - statement = Statement( - sql, - args, - get_param_types(args or None), - ResultsChecksum(), + parsed_statement: ParsedStatement = parse_utils.classify_statement( + sql, args + ) + if parsed_statement.statement_type == StatementType.CLIENT_SIDE: + self._result_set = client_side_statement_executor.execute( + self, parsed_statement ) - - ( - self._result_set, - self._checksum, - ) = self.connection.run_statement(statement) - while True: - try: + if self._result_set is not None: + if isinstance(self._result_set, StreamedManyResultSets): + self._itr = self._result_set + else: self._itr = PeekIterator(self._result_set) - break - except Aborted: - self.connection.retry_transaction() - return - - if class_ == parse_utils.STMT_NON_UPDATING: + elif self.connection._batch_mode == BatchMode.DML: + self.connection.execute_batch_dml_statement(parsed_statement) + elif self.connection.read_only or ( + not self.connection._client_transaction_started + and parsed_statement.statement_type == StatementType.QUERY + ): self._handle_DQL(sql, args or None) + elif parsed_statement.statement_type == StatementType.DDL: + self._batch_DDLs(sql) + if not self.connection._client_transaction_started: + self.connection.run_prior_DDL_statements() else: - self.connection.database.run_in_transaction( - self._do_execute_update, - sql, - args or None, - ) + self._execute_in_rw_transaction(parsed_statement) + except (AlreadyExists, FailedPrecondition, OutOfRange) as e: raise IntegrityError(getattr(e, "details", e)) from e except InvalidArgument as e: raise ProgrammingError(getattr(e, "details", e)) from e except InternalServerError as e: raise OperationalError(getattr(e, "details", e)) from e + finally: + if self.connection._client_transaction_started is False: + self.connection._spanner_transaction_started = False + + def _execute_in_rw_transaction(self, parsed_statement: ParsedStatement): + # For every other operation, we've got to ensure that + # any prior DDL statements were run. + self.connection.run_prior_DDL_statements() + if self.connection._client_transaction_started: + ( + self._result_set, + self._checksum, + ) = self.connection.run_statement(parsed_statement.statement) + + while True: + try: + self._itr = PeekIterator(self._result_set) + break + except Aborted: + self.connection.retry_transaction() + except Exception as ex: + self.connection._statements.remove(parsed_statement.statement) + raise ex + else: + self.connection.database.run_in_transaction( + self._do_execute_update_in_autocommit, + parsed_statement.statement.sql, + parsed_statement.statement.params or None, + ) @check_not_closed def executemany(self, operation, seq_of_params): @@ -309,58 +315,35 @@ def executemany(self, operation, seq_of_params): self._result_set = None self._row_count = _UNSET_COUNT - class_ = parse_utils.classify_stmt(operation) - if class_ == parse_utils.STMT_DDL: + parsed_statement = parse_utils.classify_statement(operation) + if parsed_statement.statement_type == StatementType.DDL: raise ProgrammingError( "Executing DDL statements with executemany() method is not allowed." ) - many_result_set = StreamedManyResultSets() + if parsed_statement.statement_type == StatementType.CLIENT_SIDE: + raise ProgrammingError( + "Executing the following operation: " + + operation + + ", with executemany() method is not allowed." + ) - if class_ in (parse_utils.STMT_INSERT, parse_utils.STMT_UPDATING): + # For every operation, we've got to ensure that any prior DDL + # statements were run. + self.connection.run_prior_DDL_statements() + if parsed_statement.statement_type in ( + StatementType.INSERT, + StatementType.UPDATE, + ): statements = [] - for params in seq_of_params: sql, params = parse_utils.sql_pyformat_args_to_spanner( operation, params ) - statements.append((sql, params, get_param_types(params))) - - if self.connection.autocommit: - self.connection.database.run_in_transaction( - self._do_batch_update, statements, many_result_set - ) - else: - retried = False - total_row_count = 0 - while True: - try: - transaction = self.connection.transaction_checkout() - - res_checksum = ResultsChecksum() - if not retried: - self.connection._statements.append( - (statements, res_checksum) - ) - - status, res = transaction.batch_update(statements) - many_result_set.add_iter(res) - res_checksum.consume_result(res) - res_checksum.consume_result(status.code) - total_row_count += sum([max(val, 0) for val in res]) - - if status.code == ABORTED: - self.connection._transaction = None - raise Aborted(status.message) - elif status.code != OK: - raise OperationalError(status.message) - self._row_count = total_row_count - break - except Aborted: - self.connection.retry_transaction() - retried = True - + statements.append(Statement(sql, params, get_param_types(params))) + many_result_set = batch_dml_executor.run_batch_dml(self, statements) else: + many_result_set = StreamedManyResultSets() for params in seq_of_params: self.execute(operation, params) many_result_set.add_iter(self._itr) @@ -374,7 +357,10 @@ def fetchone(self): sequence, or None when no more data is available.""" try: res = next(self) - if not self.connection.autocommit and not self.connection.read_only: + if ( + self.connection._client_transaction_started + and not self.connection.read_only + ): self._checksum.consume_result(res) return res except StopIteration: @@ -392,7 +378,10 @@ def fetchall(self): res = [] try: for row in self: - if not self.connection.autocommit and not self.connection.read_only: + if ( + self.connection._client_transaction_started + and not self.connection.read_only + ): self._checksum.consume_result(row) res.append(row) except Aborted: @@ -421,7 +410,10 @@ def fetchmany(self, size=None): for _ in range(size): try: res = next(self) - if not self.connection.autocommit and not self.connection.read_only: + if ( + self.connection._client_transaction_started + and not self.connection.read_only + ): self._checksum.consume_result(res) items.append(res) except StopIteration: @@ -446,12 +438,16 @@ def _handle_DQL_with_snapshot(self, snapshot, sql, params): # Unfortunately, Spanner doesn't seem to send back # information about the number of rows available. self._row_count = _UNSET_COUNT + if self._result_set.metadata.transaction.read_timestamp is not None: + snapshot._transaction_read_timestamp = ( + self._result_set.metadata.transaction.read_timestamp + ) def _handle_DQL(self, sql, params): if self.connection.database is None: raise ValueError("Database needs to be passed for this operation") sql, params = parse_utils.sql_pyformat_args_to_spanner(sql, params) - if self.connection.read_only and not self.connection.autocommit: + if self.connection.read_only and self.connection._client_transaction_started: # initiate or use the existing multi-use snapshot self._handle_DQL_with_snapshot( self.connection.snapshot_checkout(), sql, params @@ -461,6 +457,8 @@ def _handle_DQL(self, sql, params): with self.connection.database.snapshot( **self.connection.staleness ) as snapshot: + self.connection._snapshot = snapshot + self.connection._transaction = None self._handle_DQL_with_snapshot(snapshot, sql, params) def __enter__(self): diff --git a/google/cloud/spanner_dbapi/parse_utils.py b/google/cloud/spanner_dbapi/parse_utils.py index 84cb2dc7a5..008f21bf93 100644 --- a/google/cloud/spanner_dbapi/parse_utils.py +++ b/google/cloud/spanner_dbapi/parse_utils.py @@ -21,8 +21,12 @@ import sqlparse from google.cloud import spanner_v1 as spanner from google.cloud.spanner_v1 import JsonObject +from . import client_side_statement_parser +from deprecated import deprecated +from .checksum import ResultsChecksum from .exceptions import Error +from .parsed_statement import ParsedStatement, StatementType, Statement from .types import DateStr, TimestampStr from .utils import sanitize_literals_for_upload @@ -174,12 +178,11 @@ RE_PYFORMAT = re.compile(r"(%s|%\([^\(\)]+\)s)+", re.DOTALL) +@deprecated(reason="This method is deprecated. Use _classify_stmt method") def classify_stmt(query): """Determine SQL query type. - :type query: str :param query: A SQL query. - :rtype: str :returns: The query type name. """ @@ -203,6 +206,51 @@ def classify_stmt(query): return STMT_UPDATING +def classify_statement(query, args=None): + """Determine SQL query type. + + It is an internal method that can make backwards-incompatible changes. + + :type query: str + :param query: A SQL query. + + :rtype: ParsedStatement + :returns: parsed statement attributes. + """ + # sqlparse will strip Cloud Spanner comments, + # still, special commenting styles, like + # PostgreSQL dollar quoted comments are not + # supported and will not be stripped. + query = sqlparse.format(query, strip_comments=True).strip() + parsed_statement: ParsedStatement = client_side_statement_parser.parse_stmt(query) + if parsed_statement is not None: + return parsed_statement + query, args = sql_pyformat_args_to_spanner(query, args or None) + statement = Statement( + query, + args, + get_param_types(args or None), + ResultsChecksum(), + ) + statement_type = _get_statement_type(statement) + return ParsedStatement(statement_type, statement) + + +def _get_statement_type(statement): + query = statement.sql + if RE_DDL.match(query): + return StatementType.DDL + if RE_IS_INSERT.match(query): + return StatementType.INSERT + if RE_NON_UPDATE.match(query) or RE_WITH.match(query): + # As of 13-March-2020, Cloud Spanner only supports WITH for DQL + # statements and doesn't yet support WITH for DML statements. + return StatementType.QUERY + + statement.sql = ensure_where_clause(query) + return StatementType.UPDATE + + def sql_pyformat_args_to_spanner(sql, params): """ Transform pyformat set SQL to named arguments for Cloud Spanner. diff --git a/google/cloud/spanner_dbapi/parsed_statement.py b/google/cloud/spanner_dbapi/parsed_statement.py new file mode 100644 index 0000000000..798f5126c3 --- /dev/null +++ b/google/cloud/spanner_dbapi/parsed_statement.py @@ -0,0 +1,58 @@ +# Copyright 2023 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass +from enum import Enum +from typing import Any, List + +from google.cloud.spanner_dbapi.checksum import ResultsChecksum + + +class StatementType(Enum): + CLIENT_SIDE = 1 + DDL = 2 + QUERY = 3 + UPDATE = 4 + INSERT = 5 + + +class ClientSideStatementType(Enum): + COMMIT = 1 + BEGIN = 2 + ROLLBACK = 3 + SHOW_COMMIT_TIMESTAMP = 4 + SHOW_READ_TIMESTAMP = 5 + START_BATCH_DML = 6 + RUN_BATCH = 7 + ABORT_BATCH = 8 + PARTITION_QUERY = 9 + RUN_PARTITION = 10 + + +@dataclass +class Statement: + sql: str + params: Any = None + param_types: Any = None + checksum: ResultsChecksum = None + + def get_tuple(self): + return self.sql, self.params, self.param_types + + +@dataclass +class ParsedStatement: + statement_type: StatementType + statement: Statement + client_side_statement_type: ClientSideStatementType = None + client_side_statement_params: List[Any] = None diff --git a/google/cloud/spanner_dbapi/partition_helper.py b/google/cloud/spanner_dbapi/partition_helper.py new file mode 100644 index 0000000000..94b396c801 --- /dev/null +++ b/google/cloud/spanner_dbapi/partition_helper.py @@ -0,0 +1,46 @@ +# Copyright 2023 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Any + +import gzip +import pickle +import base64 + + +def decode_from_string(encoded_partition_id): + gzip_bytes = base64.b64decode(bytes(encoded_partition_id, "utf-8")) + partition_id_bytes = gzip.decompress(gzip_bytes) + return pickle.loads(partition_id_bytes) + + +def encode_to_string(batch_transaction_id, partition_result): + partition_id = PartitionId(batch_transaction_id, partition_result) + partition_id_bytes = pickle.dumps(partition_id) + gzip_bytes = gzip.compress(partition_id_bytes) + return str(base64.b64encode(gzip_bytes), "utf-8") + + +@dataclass +class BatchTransactionId: + transaction_id: str + session_id: str + read_timestamp: Any + + +@dataclass +class PartitionId: + batch_transaction_id: BatchTransactionId + partition_result: Any diff --git a/google/cloud/spanner_v1/__init__.py b/google/cloud/spanner_v1/__init__.py index 039919563f..47805d4ebc 100644 --- a/google/cloud/spanner_v1/__init__.py +++ b/google/cloud/spanner_v1/__init__.py @@ -34,10 +34,13 @@ from .types.result_set import ResultSetStats from .types.spanner import BatchCreateSessionsRequest from .types.spanner import BatchCreateSessionsResponse +from .types.spanner import BatchWriteRequest +from .types.spanner import BatchWriteResponse from .types.spanner import BeginTransactionRequest from .types.spanner import CommitRequest from .types.spanner import CreateSessionRequest from .types.spanner import DeleteSessionRequest +from .types.spanner import DirectedReadOptions from .types.spanner import ExecuteBatchDmlRequest from .types.spanner import ExecuteBatchDmlResponse from .types.spanner import ExecuteSqlRequest @@ -99,11 +102,14 @@ # google.cloud.spanner_v1.types "BatchCreateSessionsRequest", "BatchCreateSessionsResponse", + "BatchWriteRequest", + "BatchWriteResponse", "BeginTransactionRequest", "CommitRequest", "CommitResponse", "CreateSessionRequest", "DeleteSessionRequest", + "DirectedReadOptions", "ExecuteBatchDmlRequest", "ExecuteBatchDmlResponse", "ExecuteSqlRequest", diff --git a/google/cloud/spanner_v1/batch.py b/google/cloud/spanner_v1/batch.py index 41e4460c30..da74bf35f0 100644 --- a/google/cloud/spanner_v1/batch.py +++ b/google/cloud/spanner_v1/batch.py @@ -18,6 +18,7 @@ from google.cloud.spanner_v1 import CommitRequest from google.cloud.spanner_v1 import Mutation from google.cloud.spanner_v1 import TransactionOptions +from google.cloud.spanner_v1 import BatchWriteRequest from google.cloud.spanner_v1._helpers import _SessionWrapper from google.cloud.spanner_v1._helpers import _make_list_value_pbs @@ -215,6 +216,99 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.commit() +class MutationGroup(_BatchBase): + """A container for mutations. + + Clients should use :class:`~google.cloud.spanner_v1.MutationGroups` to + obtain instances instead of directly creating instances. + + :type session: :class:`~google.cloud.spanner_v1.session.Session` + :param session: The session used to perform the commit. + + :type mutations: list + :param mutations: The list into which mutations are to be accumulated. + """ + + def __init__(self, session, mutations=[]): + super(MutationGroup, self).__init__(session) + self._mutations = mutations + + +class MutationGroups(_SessionWrapper): + """Accumulate mutation groups for transmission during :meth:`batch_write`. + + :type session: :class:`~google.cloud.spanner_v1.session.Session` + :param session: the session used to perform the commit + """ + + committed = None + + def __init__(self, session): + super(MutationGroups, self).__init__(session) + self._mutation_groups = [] + + def _check_state(self): + """Checks if the object's state is valid for making API requests. + + :raises: :exc:`ValueError` if the object's state is invalid for making + API requests. + """ + if self.committed is not None: + raise ValueError("MutationGroups already committed") + + def group(self): + """Returns a new `MutationGroup` to which mutations can be added.""" + mutation_group = BatchWriteRequest.MutationGroup() + self._mutation_groups.append(mutation_group) + return MutationGroup(self._session, mutation_group.mutations) + + def batch_write(self, request_options=None): + """Executes batch_write. + + :type request_options: + :class:`google.cloud.spanner_v1.types.RequestOptions` + :param request_options: + (Optional) Common options for this request. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.spanner_v1.types.RequestOptions`. + + :rtype: :class:`Iterable[google.cloud.spanner_v1.types.BatchWriteResponse]` + :returns: a sequence of responses for each batch. + """ + self._check_state() + + database = self._session._database + api = database.spanner_api + metadata = _metadata_with_prefix(database.name) + if database._route_to_leader_enabled: + metadata.append( + _metadata_with_leader_aware_routing(database._route_to_leader_enabled) + ) + trace_attributes = {"num_mutation_groups": len(self._mutation_groups)} + if request_options is None: + request_options = RequestOptions() + elif type(request_options) is dict: + request_options = RequestOptions(request_options) + + request = BatchWriteRequest( + session=self._session.name, + mutation_groups=self._mutation_groups, + request_options=request_options, + ) + with trace_call("CloudSpanner.BatchWrite", self._session, trace_attributes): + method = functools.partial( + api.batch_write, + request=request, + metadata=metadata, + ) + response = _retry( + method, + allowed_exceptions={InternalServerError: _check_rst_stream_error}, + ) + self.committed = True + return response + + def _make_write_pb(table, columns, values): """Helper for :meth:`Batch.insert` et al. diff --git a/google/cloud/spanner_v1/client.py b/google/cloud/spanner_v1/client.py index a0e848228b..f8f3fdb72c 100644 --- a/google/cloud/spanner_v1/client.py +++ b/google/cloud/spanner_v1/client.py @@ -120,6 +120,12 @@ class Client(ClientWithProject): disable leader aware routing. Disabling leader aware routing would route all requests in RW/PDML transactions to the closest region. + :type directed_read_options: :class:`~google.cloud.spanner_v1.DirectedReadOptions` + or :class:`dict` + :param directed_read_options: (Optional) Client options used to set the directed_read_options + for all ReadRequests and ExecuteSqlRequests that indicates which replicas + or regions should be used for non-transactional reads or queries. + :raises: :class:`ValueError ` if both ``read_only`` and ``admin`` are :data:`True` """ @@ -139,6 +145,7 @@ def __init__( client_options=None, query_options=None, route_to_leader_enabled=True, + directed_read_options=None, ): self._emulator_host = _get_spanner_emulator_host() @@ -179,6 +186,7 @@ def __init__( warnings.warn(_EMULATOR_HOST_HTTP_SCHEME) self._route_to_leader_enabled = route_to_leader_enabled + self._directed_read_options = directed_read_options @property def credentials(self): @@ -260,6 +268,17 @@ def route_to_leader_enabled(self): """ return self._route_to_leader_enabled + @property + def directed_read_options(self): + """Getter for directed_read_options. + + :rtype: + :class:`~google.cloud.spanner_v1.DirectedReadOptions` + or :class:`dict` + :returns: The directed_read_options for the client. + """ + return self._directed_read_options + def copy(self): """Make a copy of this client. @@ -383,3 +402,14 @@ def list_instances(self, filter_="", page_size=None): request=request, metadata=metadata ) return page_iter + + @directed_read_options.setter + def directed_read_options(self, directed_read_options): + """Sets directed_read_options for the client + :type directed_read_options: :class:`~google.cloud.spanner_v1.DirectedReadOptions` + or :class:`dict` + :param directed_read_options: Client options used to set the directed_read_options + for all ReadRequests and ExecuteSqlRequests that indicates which replicas + or regions should be used for non-transactional reads or queries. + """ + self._directed_read_options = directed_read_options diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 1d211f7d6d..c8c3b92edc 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -16,6 +16,7 @@ import copy import functools + import grpc import logging import re @@ -39,6 +40,7 @@ from google.cloud.spanner_admin_database_v1 import RestoreDatabaseRequest from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest from google.cloud.spanner_admin_database_v1.types import DatabaseDialect +from google.cloud.spanner_dbapi.partition_helper import BatchTransactionId from google.cloud.spanner_v1 import ExecuteSqlRequest from google.cloud.spanner_v1 import TransactionSelector from google.cloud.spanner_v1 import TransactionOptions @@ -50,6 +52,7 @@ _metadata_with_leader_aware_routing, ) from google.cloud.spanner_v1.batch import Batch +from google.cloud.spanner_v1.batch import MutationGroups from google.cloud.spanner_v1.keyset import KeySet from google.cloud.spanner_v1.pool import BurstyPool from google.cloud.spanner_v1.pool import SessionCheckout @@ -166,6 +169,7 @@ def __init__( self._route_to_leader_enabled = self._instance._client.route_to_leader_enabled self._enable_drop_protection = enable_drop_protection self._reconciling = False + self._directed_read_options = self._instance._client.directed_read_options if pool is None: pool = BurstyPool(database_role=database_role) @@ -648,7 +652,6 @@ def execute_partitioned_dml( def execute_pdml(): with SessionCheckout(self._pool) as session: - txn = api.begin_transaction( session=session.name, options=txn_options, metadata=metadata ) @@ -735,7 +738,24 @@ def batch(self, request_options=None): """ return BatchCheckout(self, request_options) - def batch_snapshot(self, read_timestamp=None, exact_staleness=None): + def mutation_groups(self): + """Return an object which wraps a mutation_group. + + The wrapper *must* be used as a context manager, with the mutation group + as the value returned by the wrapper. + + :rtype: :class:`~google.cloud.spanner_v1.database.MutationGroupsCheckout` + :returns: new wrapper + """ + return MutationGroupsCheckout(self) + + def batch_snapshot( + self, + read_timestamp=None, + exact_staleness=None, + session_id=None, + transaction_id=None, + ): """Return an object which wraps a batch read / query. :type read_timestamp: :class:`datetime.datetime` @@ -745,11 +765,21 @@ def batch_snapshot(self, read_timestamp=None, exact_staleness=None): :param exact_staleness: Execute all reads at a timestamp that is ``exact_staleness`` old. + :type session_id: str + :param session_id: id of the session used in transaction + + :type transaction_id: str + :param transaction_id: id of the transaction + :rtype: :class:`~google.cloud.spanner_v1.database.BatchSnapshot` :returns: new wrapper """ return BatchSnapshot( - self, read_timestamp=read_timestamp, exact_staleness=exact_staleness + self, + read_timestamp=read_timestamp, + exact_staleness=exact_staleness, + session_id=session_id, + transaction_id=transaction_id, ) def run_in_transaction(self, func, *args, **kw): @@ -1041,6 +1071,39 @@ def __exit__(self, exc_type, exc_val, exc_tb): self._database._pool.put(self._session) +class MutationGroupsCheckout(object): + """Context manager for using mutation groups from a database. + + Inside the context manager, checks out a session from the database, + creates mutation groups from it, making the groups available. + + Caller must *not* use the object to perform API requests outside the scope + of the context manager. + + :type database: :class:`~google.cloud.spanner_v1.database.Database` + :param database: database to use + """ + + def __init__(self, database): + self._database = database + self._session = None + + def __enter__(self): + """Begin ``with`` block.""" + session = self._session = self._database._pool.get() + return MutationGroups(session) + + def __exit__(self, exc_type, exc_val, exc_tb): + """End ``with`` block.""" + if isinstance(exc_val, NotFound): + # If NotFound exception occurs inside the with block + # then we validate if the session still exists. + if not self._session.exists(): + self._session = self._database._pool._new_session() + self._session.create() + self._database._pool.put(self._session) + + class SnapshotCheckout(object): """Context manager for using a snapshot from a database. @@ -1094,10 +1157,19 @@ class BatchSnapshot(object): ``exact_staleness`` old. """ - def __init__(self, database, read_timestamp=None, exact_staleness=None): + def __init__( + self, + database, + read_timestamp=None, + exact_staleness=None, + session_id=None, + transaction_id=None, + ): self._database = database + self._session_id = session_id self._session = None self._snapshot = None + self._transaction_id = transaction_id self._read_timestamp = read_timestamp self._exact_staleness = exact_staleness @@ -1145,7 +1217,10 @@ def _get_session(self): """ if self._session is None: session = self._session = self._database.session() - session.create() + if self._session_id is None: + session.create() + else: + session._session_id = self._session_id return self._session def _get_snapshot(self): @@ -1155,10 +1230,22 @@ def _get_snapshot(self): read_timestamp=self._read_timestamp, exact_staleness=self._exact_staleness, multi_use=True, + transaction_id=self._transaction_id, ) - self._snapshot.begin() + if self._transaction_id is None: + self._snapshot.begin() return self._snapshot + def get_batch_transaction_id(self): + snapshot = self._snapshot + if snapshot is None: + raise ValueError("Read-only transaction not begun") + return BatchTransactionId( + snapshot._transaction_id, + snapshot._session.session_id, + snapshot._read_timestamp, + ) + def read(self, *args, **kw): """Convenience method: perform read operation via snapshot. @@ -1182,6 +1269,7 @@ def generate_read_batches( partition_size_bytes=None, max_partitions=None, data_boost_enabled=False, + directed_read_options=None, *, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT, @@ -1221,6 +1309,12 @@ def generate_read_batches( (Optional) If this is for a partitioned read and this field is set ``true``, the request will be executed via offline access. + :type directed_read_options: :class:`~google.cloud.spanner_v1.DirectedReadOptions` + or :class:`dict` + :param directed_read_options: (Optional) Request level option used to set the directed_read_options + for ReadRequests that indicates which replicas + or regions should be used for non-transactional reads. + :type retry: :class:`~google.api_core.retry.Retry` :param retry: (Optional) The retry settings for this request. @@ -1249,6 +1343,7 @@ def generate_read_batches( "keyset": keyset._to_dict(), "index": index, "data_boost_enabled": data_boost_enabled, + "directed_read_options": directed_read_options, } for partition in partitions: yield {"partition": partition, "read": read_info.copy()} @@ -1293,6 +1388,7 @@ def generate_query_batches( max_partitions=None, query_options=None, data_boost_enabled=False, + directed_read_options=None, *, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT, @@ -1344,6 +1440,12 @@ def generate_query_batches( (Optional) If this is for a partitioned query and this field is set ``true``, the request will be executed via offline access. + :type directed_read_options: :class:`~google.cloud.spanner_v1.DirectedReadOptions` + or :class:`dict` + :param directed_read_options: (Optional) Request level option used to set the directed_read_options + for ExecuteSqlRequests that indicates which replicas + or regions should be used for non-transactional queries. + :type retry: :class:`~google.api_core.retry.Retry` :param retry: (Optional) The retry settings for this request. @@ -1368,6 +1470,7 @@ def generate_query_batches( query_info = { "sql": sql, "data_boost_enabled": data_boost_enabled, + "directed_read_options": directed_read_options, } if params: query_info["params"] = params diff --git a/google/cloud/spanner_v1/gapic_metadata.json b/google/cloud/spanner_v1/gapic_metadata.json index ea51736a55..f5957c633a 100644 --- a/google/cloud/spanner_v1/gapic_metadata.json +++ b/google/cloud/spanner_v1/gapic_metadata.json @@ -15,6 +15,11 @@ "batch_create_sessions" ] }, + "BatchWrite": { + "methods": [ + "batch_write" + ] + }, "BeginTransaction": { "methods": [ "begin_transaction" @@ -95,6 +100,11 @@ "batch_create_sessions" ] }, + "BatchWrite": { + "methods": [ + "batch_write" + ] + }, "BeginTransaction": { "methods": [ "begin_transaction" @@ -175,6 +185,11 @@ "batch_create_sessions" ] }, + "BatchWrite": { + "methods": [ + "batch_write" + ] + }, "BeginTransaction": { "methods": [ "begin_transaction" diff --git a/google/cloud/spanner_v1/gapic_version.py b/google/cloud/spanner_v1/gapic_version.py index 4f879f0e40..36303c7f1a 100644 --- a/google/cloud/spanner_v1/gapic_version.py +++ b/google/cloud/spanner_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.40.1" # {x-release-please-version} +__version__ = "3.41.0" # {x-release-please-version} diff --git a/google/cloud/spanner_v1/services/spanner/async_client.py b/google/cloud/spanner_v1/services/spanner/async_client.py index a394467ffd..f4cd066bd9 100644 --- a/google/cloud/spanner_v1/services/spanner/async_client.py +++ b/google/cloud/spanner_v1/services/spanner/async_client.py @@ -35,14 +35,14 @@ from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 -from google.api_core import retry as retries +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore from google.cloud.spanner_v1.services.spanner import pagers from google.cloud.spanner_v1.types import commit_response @@ -60,6 +60,7 @@ class SpannerAsyncClient: """Cloud Spanner API + The Cloud Spanner API can be used to manage sessions and execute transactions on data stored in Cloud Spanner databases. """ @@ -285,7 +286,7 @@ async def sample_create_session(): This corresponds to the ``database`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -316,7 +317,7 @@ async def sample_create_session(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_session, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.25, maximum=32.0, multiplier=1.3, @@ -357,6 +358,7 @@ async def batch_create_sessions( metadata: Sequence[Tuple[str, str]] = (), ) -> spanner.BatchCreateSessionsResponse: r"""Creates multiple new sessions. + This API can be used to initialize a session cache on the clients. See https://goo.gl/TgSFN2 for best practices on session cache management. @@ -411,7 +413,7 @@ async def sample_batch_create_sessions(): This corresponds to the ``session_count`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -420,7 +422,7 @@ async def sample_batch_create_sessions(): Returns: google.cloud.spanner_v1.types.BatchCreateSessionsResponse: The response for - [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. + [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. """ # Create or coerce a protobuf request object. @@ -446,7 +448,7 @@ async def sample_batch_create_sessions(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.batch_create_sessions, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.25, maximum=32.0, multiplier=1.3, @@ -526,7 +528,7 @@ async def sample_get_session(): This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -557,7 +559,7 @@ async def sample_get_session(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_session, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.25, maximum=32.0, multiplier=1.3, @@ -636,7 +638,7 @@ async def sample_list_sessions(): This corresponds to the ``database`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -672,7 +674,7 @@ async def sample_list_sessions(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_sessions, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.25, maximum=32.0, multiplier=1.3, @@ -758,7 +760,7 @@ async def sample_delete_session(): This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -785,7 +787,7 @@ async def sample_delete_session(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_session, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.25, maximum=32.0, multiplier=1.3, @@ -867,7 +869,7 @@ async def sample_execute_sql(): The request object. The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -886,7 +888,7 @@ async def sample_execute_sql(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.execute_sql, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.25, maximum=32.0, multiplier=1.3, @@ -964,7 +966,7 @@ async def sample_execute_streaming_sql(): The request object. The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1065,7 +1067,7 @@ async def sample_execute_batch_dml(): request (Optional[Union[google.cloud.spanner_v1.types.ExecuteBatchDmlRequest, dict]]): The request object. The request for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1073,8 +1075,10 @@ async def sample_execute_batch_dml(): Returns: google.cloud.spanner_v1.types.ExecuteBatchDmlResponse: - The response for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list - of [ResultSet][google.spanner.v1.ResultSet] messages, + The response for + [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. + Contains a list of + [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML statement that has successfully executed, in the same order as the statements in the request. If a statement fails, the status in the @@ -1084,34 +1088,35 @@ async def sample_execute_batch_dml(): following approach: 1. Check the status in the response message. The - [google.rpc.Code][google.rpc.Code] enum value OK - indicates that all statements were executed - successfully. - 2. If the status was not OK, check the number of - result sets in the response. If the response - contains N - [ResultSet][google.spanner.v1.ResultSet] messages, - then statement N+1 in the request failed. + [google.rpc.Code][google.rpc.Code] enum value OK + indicates that all statements were executed + successfully. 2. If the status was not OK, check the + number of result sets in the response. If the + response contains N + [ResultSet][google.spanner.v1.ResultSet] messages, + then statement N+1 in the request failed. Example 1: - Request: 5 DML statements, all executed successfully. - - Response: 5 - [ResultSet][google.spanner.v1.ResultSet] messages, - with the status OK. + + \* Response: 5 + [ResultSet][google.spanner.v1.ResultSet] messages, + with the status OK. Example 2: - Request: 5 DML statements. The third statement has a syntax error. - - Response: 2 - [ResultSet][google.spanner.v1.ResultSet] messages, - and a syntax error (INVALID_ARGUMENT) status. The - number of [ResultSet][google.spanner.v1.ResultSet] - messages indicates that the third statement - failed, and the fourth and fifth statements were - not executed. + + \* Response: 2 + [ResultSet][google.spanner.v1.ResultSet] messages, + and a syntax error (INVALID_ARGUMENT) status. The + number of [ResultSet][google.spanner.v1.ResultSet] + messages indicates that the third statement failed, + and the fourth and fifth statements were not + executed. """ # Create or coerce a protobuf request object. @@ -1121,7 +1126,7 @@ async def sample_execute_batch_dml(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.execute_batch_dml, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.25, maximum=32.0, multiplier=1.3, @@ -1208,7 +1213,7 @@ async def sample_read(): The request object. The request for [Read][google.spanner.v1.Spanner.Read] and [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1227,7 +1232,7 @@ async def sample_read(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.read, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.25, maximum=32.0, multiplier=1.3, @@ -1306,7 +1311,7 @@ async def sample_streaming_read(): The request object. The request for [Read][google.spanner.v1.Spanner.Read] and [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1409,7 +1414,7 @@ async def sample_begin_transaction(): This corresponds to the ``options`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1442,7 +1447,7 @@ async def sample_begin_transaction(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.begin_transaction, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.25, maximum=32.0, multiplier=1.3, @@ -1570,7 +1575,7 @@ async def sample_commit(): This corresponds to the ``single_use_transaction`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1611,7 +1616,7 @@ async def sample_commit(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.commit, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.25, maximum=32.0, multiplier=1.3, @@ -1704,7 +1709,7 @@ async def sample_rollback(): This corresponds to the ``transaction_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1733,7 +1738,7 @@ async def sample_rollback(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.rollback, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.25, maximum=32.0, multiplier=1.3, @@ -1814,7 +1819,7 @@ async def sample_partition_query(): request (Optional[Union[google.cloud.spanner_v1.types.PartitionQueryRequest, dict]]): The request object. The request for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1834,7 +1839,7 @@ async def sample_partition_query(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.partition_query, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.25, maximum=32.0, multiplier=1.3, @@ -1921,7 +1926,7 @@ async def sample_partition_read(): request (Optional[Union[google.cloud.spanner_v1.types.PartitionReadRequest, dict]]): The request object. The request for [PartitionRead][google.spanner.v1.Spanner.PartitionRead] - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1941,7 +1946,7 @@ async def sample_partition_read(): # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.partition_read, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.25, maximum=32.0, multiplier=1.3, @@ -1971,6 +1976,143 @@ async def sample_partition_read(): # Done; return the response. return response + def batch_write( + self, + request: Optional[Union[spanner.BatchWriteRequest, dict]] = None, + *, + session: Optional[str] = None, + mutation_groups: Optional[ + MutableSequence[spanner.BatchWriteRequest.MutationGroup] + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[spanner.BatchWriteResponse]]: + r"""Batches the supplied mutation groups in a collection + of efficient transactions. All mutations in a group are + committed atomically. However, mutations across groups + can be committed non-atomically in an unspecified order + and thus, they must be independent of each other. + Partial failure is possible, i.e., some groups may have + been committed successfully, while some may have failed. + The results of individual batches are streamed into the + response as the batches are applied. + + BatchWrite requests are not replay protected, meaning + that each mutation group may be applied more than once. + Replays of non-idempotent mutations may have undesirable + effects. For example, replays of an insert mutation may + produce an already exists error or if you use generated + or commit timestamp-based keys, it may result in + additional rows being added to the mutation's table. We + recommend structuring your mutation groups to be + idempotent to avoid this issue. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import spanner_v1 + + async def sample_batch_write(): + # Create a client + client = spanner_v1.SpannerAsyncClient() + + # Initialize request argument(s) + mutation_groups = spanner_v1.MutationGroup() + mutation_groups.mutations.insert.table = "table_value" + + request = spanner_v1.BatchWriteRequest( + session="session_value", + mutation_groups=mutation_groups, + ) + + # Make the request + stream = await client.batch_write(request=request) + + # Handle the response + async for response in stream: + print(response) + + Args: + request (Optional[Union[google.cloud.spanner_v1.types.BatchWriteRequest, dict]]): + The request object. The request for + [BatchWrite][google.spanner.v1.Spanner.BatchWrite]. + session (:class:`str`): + Required. The session in which the + batch request is to be run. + + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + mutation_groups (:class:`MutableSequence[google.cloud.spanner_v1.types.BatchWriteRequest.MutationGroup]`): + Required. The groups of mutations to + be applied. + + This corresponds to the ``mutation_groups`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.spanner_v1.types.BatchWriteResponse]: + The result of applying a batch of + mutations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([session, mutation_groups]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner.BatchWriteRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if session is not None: + request.session = session + if mutation_groups: + request.mutation_groups.extend(mutation_groups) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_write, + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def __aenter__(self) -> "SpannerAsyncClient": return self diff --git a/google/cloud/spanner_v1/services/spanner/client.py b/google/cloud/spanner_v1/services/spanner/client.py index f3130c56f6..28f203fff7 100644 --- a/google/cloud/spanner_v1/services/spanner/client.py +++ b/google/cloud/spanner_v1/services/spanner/client.py @@ -99,6 +99,7 @@ def get_transport_class( class SpannerClient(metaclass=SpannerClientMeta): """Cloud Spanner API + The Cloud Spanner API can be used to manage sessions and execute transactions on data stored in Cloud Spanner databases. """ @@ -604,6 +605,7 @@ def batch_create_sessions( metadata: Sequence[Tuple[str, str]] = (), ) -> spanner.BatchCreateSessionsResponse: r"""Creates multiple new sessions. + This API can be used to initialize a session cache on the clients. See https://goo.gl/TgSFN2 for best practices on session cache management. @@ -667,7 +669,7 @@ def sample_batch_create_sessions(): Returns: google.cloud.spanner_v1.types.BatchCreateSessionsResponse: The response for - [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. + [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. """ # Create or coerce a protobuf request object. @@ -1277,8 +1279,10 @@ def sample_execute_batch_dml(): Returns: google.cloud.spanner_v1.types.ExecuteBatchDmlResponse: - The response for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list - of [ResultSet][google.spanner.v1.ResultSet] messages, + The response for + [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. + Contains a list of + [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML statement that has successfully executed, in the same order as the statements in the request. If a statement fails, the status in the @@ -1288,34 +1292,35 @@ def sample_execute_batch_dml(): following approach: 1. Check the status in the response message. The - [google.rpc.Code][google.rpc.Code] enum value OK - indicates that all statements were executed - successfully. - 2. If the status was not OK, check the number of - result sets in the response. If the response - contains N - [ResultSet][google.spanner.v1.ResultSet] messages, - then statement N+1 in the request failed. + [google.rpc.Code][google.rpc.Code] enum value OK + indicates that all statements were executed + successfully. 2. If the status was not OK, check the + number of result sets in the response. If the + response contains N + [ResultSet][google.spanner.v1.ResultSet] messages, + then statement N+1 in the request failed. Example 1: - Request: 5 DML statements, all executed successfully. - - Response: 5 - [ResultSet][google.spanner.v1.ResultSet] messages, - with the status OK. + + \* Response: 5 + [ResultSet][google.spanner.v1.ResultSet] messages, + with the status OK. Example 2: - Request: 5 DML statements. The third statement has a syntax error. - - Response: 2 - [ResultSet][google.spanner.v1.ResultSet] messages, - and a syntax error (INVALID_ARGUMENT) status. The - number of [ResultSet][google.spanner.v1.ResultSet] - messages indicates that the third statement - failed, and the fourth and fifth statements were - not executed. + + \* Response: 2 + [ResultSet][google.spanner.v1.ResultSet] messages, + and a syntax error (INVALID_ARGUMENT) status. The + number of [ResultSet][google.spanner.v1.ResultSet] + messages indicates that the third statement failed, + and the fourth and fifth statements were not + executed. """ # Create or coerce a protobuf request object. @@ -2117,6 +2122,143 @@ def sample_partition_read(): # Done; return the response. return response + def batch_write( + self, + request: Optional[Union[spanner.BatchWriteRequest, dict]] = None, + *, + session: Optional[str] = None, + mutation_groups: Optional[ + MutableSequence[spanner.BatchWriteRequest.MutationGroup] + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[spanner.BatchWriteResponse]: + r"""Batches the supplied mutation groups in a collection + of efficient transactions. All mutations in a group are + committed atomically. However, mutations across groups + can be committed non-atomically in an unspecified order + and thus, they must be independent of each other. + Partial failure is possible, i.e., some groups may have + been committed successfully, while some may have failed. + The results of individual batches are streamed into the + response as the batches are applied. + + BatchWrite requests are not replay protected, meaning + that each mutation group may be applied more than once. + Replays of non-idempotent mutations may have undesirable + effects. For example, replays of an insert mutation may + produce an already exists error or if you use generated + or commit timestamp-based keys, it may result in + additional rows being added to the mutation's table. We + recommend structuring your mutation groups to be + idempotent to avoid this issue. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import spanner_v1 + + def sample_batch_write(): + # Create a client + client = spanner_v1.SpannerClient() + + # Initialize request argument(s) + mutation_groups = spanner_v1.MutationGroup() + mutation_groups.mutations.insert.table = "table_value" + + request = spanner_v1.BatchWriteRequest( + session="session_value", + mutation_groups=mutation_groups, + ) + + # Make the request + stream = client.batch_write(request=request) + + # Handle the response + for response in stream: + print(response) + + Args: + request (Union[google.cloud.spanner_v1.types.BatchWriteRequest, dict]): + The request object. The request for + [BatchWrite][google.spanner.v1.Spanner.BatchWrite]. + session (str): + Required. The session in which the + batch request is to be run. + + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + mutation_groups (MutableSequence[google.cloud.spanner_v1.types.BatchWriteRequest.MutationGroup]): + Required. The groups of mutations to + be applied. + + This corresponds to the ``mutation_groups`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.spanner_v1.types.BatchWriteResponse]: + The result of applying a batch of + mutations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([session, mutation_groups]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.BatchWriteRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.BatchWriteRequest): + request = spanner.BatchWriteRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if session is not None: + request.session = session + if mutation_groups is not None: + request.mutation_groups = mutation_groups + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_write] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "SpannerClient": return self diff --git a/google/cloud/spanner_v1/services/spanner/transports/base.py b/google/cloud/spanner_v1/services/spanner/transports/base.py index 668191c5f2..27006d8fbc 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/base.py +++ b/google/cloud/spanner_v1/services/spanner/transports/base.py @@ -322,6 +322,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=30.0, client_info=client_info, ), + self.batch_write: gapic_v1.method.wrap_method( + self.batch_write, + default_timeout=3600.0, + client_info=client_info, + ), } def close(self): @@ -473,6 +478,15 @@ def partition_read( ]: raise NotImplementedError() + @property + def batch_write( + self, + ) -> Callable[ + [spanner.BatchWriteRequest], + Union[spanner.BatchWriteResponse, Awaitable[spanner.BatchWriteResponse]], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc.py b/google/cloud/spanner_v1/services/spanner/transports/grpc.py index e54453671b..86d9ba4133 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/grpc.py +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc.py @@ -36,6 +36,7 @@ class SpannerGrpcTransport(SpannerTransport): """gRPC backend transport for Spanner. Cloud Spanner API + The Cloud Spanner API can be used to manage sessions and execute transactions on data stored in Cloud Spanner databases. @@ -288,6 +289,7 @@ def batch_create_sessions( r"""Return a callable for the batch create sessions method over gRPC. Creates multiple new sessions. + This API can be used to initialize a session cache on the clients. See https://goo.gl/TgSFN2 for best practices on session cache management. @@ -753,6 +755,50 @@ def partition_read( ) return self._stubs["partition_read"] + @property + def batch_write( + self, + ) -> Callable[[spanner.BatchWriteRequest], spanner.BatchWriteResponse]: + r"""Return a callable for the batch write method over gRPC. + + Batches the supplied mutation groups in a collection + of efficient transactions. All mutations in a group are + committed atomically. However, mutations across groups + can be committed non-atomically in an unspecified order + and thus, they must be independent of each other. + Partial failure is possible, i.e., some groups may have + been committed successfully, while some may have failed. + The results of individual batches are streamed into the + response as the batches are applied. + + BatchWrite requests are not replay protected, meaning + that each mutation group may be applied more than once. + Replays of non-idempotent mutations may have undesirable + effects. For example, replays of an insert mutation may + produce an already exists error or if you use generated + or commit timestamp-based keys, it may result in + additional rows being added to the mutation's table. We + recommend structuring your mutation groups to be + idempotent to avoid this issue. + + Returns: + Callable[[~.BatchWriteRequest], + ~.BatchWriteResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_write" not in self._stubs: + self._stubs["batch_write"] = self.grpc_channel.unary_stream( + "/google.spanner.v1.Spanner/BatchWrite", + request_serializer=spanner.BatchWriteRequest.serialize, + response_deserializer=spanner.BatchWriteResponse.deserialize, + ) + return self._stubs["batch_write"] + def close(self): self.grpc_channel.close() diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py index 78548aa2f8..d0755e3a67 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py @@ -37,6 +37,7 @@ class SpannerGrpcAsyncIOTransport(SpannerTransport): """gRPC AsyncIO backend transport for Spanner. Cloud Spanner API + The Cloud Spanner API can be used to manage sessions and execute transactions on data stored in Cloud Spanner databases. @@ -292,6 +293,7 @@ def batch_create_sessions( r"""Return a callable for the batch create sessions method over gRPC. Creates multiple new sessions. + This API can be used to initialize a session cache on the clients. See https://goo.gl/TgSFN2 for best practices on session cache management. @@ -769,6 +771,50 @@ def partition_read( ) return self._stubs["partition_read"] + @property + def batch_write( + self, + ) -> Callable[[spanner.BatchWriteRequest], Awaitable[spanner.BatchWriteResponse]]: + r"""Return a callable for the batch write method over gRPC. + + Batches the supplied mutation groups in a collection + of efficient transactions. All mutations in a group are + committed atomically. However, mutations across groups + can be committed non-atomically in an unspecified order + and thus, they must be independent of each other. + Partial failure is possible, i.e., some groups may have + been committed successfully, while some may have failed. + The results of individual batches are streamed into the + response as the batches are applied. + + BatchWrite requests are not replay protected, meaning + that each mutation group may be applied more than once. + Replays of non-idempotent mutations may have undesirable + effects. For example, replays of an insert mutation may + produce an already exists error or if you use generated + or commit timestamp-based keys, it may result in + additional rows being added to the mutation's table. We + recommend structuring your mutation groups to be + idempotent to avoid this issue. + + Returns: + Callable[[~.BatchWriteRequest], + Awaitable[~.BatchWriteResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_write" not in self._stubs: + self._stubs["batch_write"] = self.grpc_channel.unary_stream( + "/google.spanner.v1.Spanner/BatchWrite", + request_serializer=spanner.BatchWriteRequest.serialize, + response_deserializer=spanner.BatchWriteResponse.deserialize, + ) + return self._stubs["batch_write"] + def close(self): return self.grpc_channel.close() diff --git a/google/cloud/spanner_v1/services/spanner/transports/rest.py b/google/cloud/spanner_v1/services/spanner/transports/rest.py index 83abd878df..5e32bfaf2a 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/rest.py +++ b/google/cloud/spanner_v1/services/spanner/transports/rest.py @@ -78,6 +78,14 @@ def post_batch_create_sessions(self, response): logging.log(f"Received response: {response}") return response + def pre_batch_write(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_batch_write(self, response): + logging.log(f"Received response: {response}") + return response + def pre_begin_transaction(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -211,6 +219,27 @@ def post_batch_create_sessions( """ return response + def pre_batch_write( + self, request: spanner.BatchWriteRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[spanner.BatchWriteRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for batch_write + + Override in a subclass to manipulate the request or metadata + before they are sent to the Spanner server. + """ + return request, metadata + + def post_batch_write( + self, response: rest_streaming.ResponseIterator + ) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for batch_write + + Override in a subclass to manipulate the response + after it is returned by the Spanner server but before + it is returned to user code. + """ + return response + def pre_begin_transaction( self, request: spanner.BeginTransactionRequest, @@ -493,6 +522,7 @@ class SpannerRestTransport(SpannerTransport): """REST backend transport for Spanner. Cloud Spanner API + The Cloud Spanner API can be used to manage sessions and execute transactions on data stored in Cloud Spanner databases. @@ -680,6 +710,101 @@ def __call__( resp = self._interceptor.post_batch_create_sessions(resp) return resp + class _BatchWrite(SpannerRestStub): + def __hash__(self): + return hash("BatchWrite") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: spanner.BatchWriteRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> rest_streaming.ResponseIterator: + r"""Call the batch write method over HTTP. + + Args: + request (~.spanner.BatchWriteRequest): + The request object. The request for + [BatchWrite][google.spanner.v1.Spanner.BatchWrite]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.BatchWriteResponse: + The result of applying a batch of + mutations. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:batchWrite", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_batch_write(request, metadata) + pb_request = spanner.BatchWriteRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator(response, spanner.BatchWriteResponse) + resp = self._interceptor.post_batch_write(resp) + return resp + class _BeginTransaction(SpannerRestStub): def __hash__(self): return hash("BeginTransaction") @@ -2055,6 +2180,14 @@ def batch_create_sessions( # In C++ this would require a dynamic_cast return self._BatchCreateSessions(self._session, self._host, self._interceptor) # type: ignore + @property + def batch_write( + self, + ) -> Callable[[spanner.BatchWriteRequest], spanner.BatchWriteResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._BatchWrite(self._session, self._host, self._interceptor) # type: ignore + @property def begin_transaction( self, diff --git a/google/cloud/spanner_v1/session.py b/google/cloud/spanner_v1/session.py index 256e72511b..b25af53805 100644 --- a/google/cloud/spanner_v1/session.py +++ b/google/cloud/spanner_v1/session.py @@ -441,7 +441,6 @@ def _delay_until_retry(exc, deadline, attempts): delay = _get_retry_delay(cause, attempts) if delay is not None: - if now + delay > deadline: raise diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py index 573042aa11..491ff37d4a 100644 --- a/google/cloud/spanner_v1/snapshot.py +++ b/google/cloud/spanner_v1/snapshot.py @@ -173,6 +173,7 @@ def read( partition=None, request_options=None, data_boost_enabled=False, + directed_read_options=None, *, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT, @@ -224,6 +225,12 @@ def read( ``partition_token``, the API will return an ``INVALID_ARGUMENT`` error. + :type directed_read_options: :class:`~google.cloud.spanner_v1.DirectedReadOptions` + or :class:`dict` + :param directed_read_options: (Optional) Request level option used to set the directed_read_options + for all ReadRequests and ExecuteSqlRequests that indicates which replicas + or regions should be used for non-transactional reads or queries. + :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. @@ -253,6 +260,11 @@ def read( if self._read_only: # Transaction tags are not supported for read only transactions. request_options.transaction_tag = None + if ( + directed_read_options is None + and database._directed_read_options is not None + ): + directed_read_options = database._directed_read_options elif self.transaction_tag is not None: request_options.transaction_tag = self.transaction_tag @@ -266,6 +278,7 @@ def read( partition_token=partition, request_options=request_options, data_boost_enabled=data_boost_enabled, + directed_read_options=directed_read_options, ) restart = functools.partial( api.streaming_read, @@ -322,6 +335,7 @@ def execute_sql( retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT, data_boost_enabled=False, + directed_read_options=None, ): """Perform an ``ExecuteStreamingSql`` API request. @@ -379,6 +393,12 @@ def execute_sql( ``partition_token``, the API will return an ``INVALID_ARGUMENT`` error. + :type directed_read_options: :class:`~google.cloud.spanner_v1.DirectedReadOptions` + or :class:`dict` + :param directed_read_options: (Optional) Request level option used to set the directed_read_options + for all ReadRequests and ExecuteSqlRequests that indicates which replicas + or regions should be used for non-transactional reads or queries. + :raises ValueError: for reuse of single-use snapshots, or if a transaction ID is already pending for multiple-use snapshots. @@ -419,6 +439,11 @@ def execute_sql( if self._read_only: # Transaction tags are not supported for read only transactions. request_options.transaction_tag = None + if ( + directed_read_options is None + and database._directed_read_options is not None + ): + directed_read_options = database._directed_read_options elif self.transaction_tag is not None: request_options.transaction_tag = self.transaction_tag @@ -433,6 +458,7 @@ def execute_sql( query_options=query_options, request_options=request_options, data_boost_enabled=data_boost_enabled, + directed_read_options=directed_read_options, ) restart = functools.partial( api.execute_streaming_sql, @@ -447,31 +473,19 @@ def execute_sql( if self._transaction_id is None: # lock is added to handle the inline begin for first rpc with self._lock: - iterator = _restart_on_unavailable( - restart, - request, - "CloudSpanner.ReadWriteTransaction", - self._session, - trace_attributes, - transaction=self, - ) - self._read_request_count += 1 - self._execute_sql_count += 1 - - if self._multi_use: - return StreamedResultSet(iterator, source=self) - else: - return StreamedResultSet(iterator) + return self._get_streamed_result_set(restart, request, trace_attributes) else: - iterator = _restart_on_unavailable( - restart, - request, - "CloudSpanner.ReadWriteTransaction", - self._session, - trace_attributes, - transaction=self, - ) + return self._get_streamed_result_set(restart, request, trace_attributes) + def _get_streamed_result_set(self, restart, request, trace_attributes): + iterator = _restart_on_unavailable( + restart, + request, + "CloudSpanner.ReadWriteTransaction", + self._session, + trace_attributes, + transaction=self, + ) self._read_request_count += 1 self._execute_sql_count += 1 @@ -724,6 +738,7 @@ def __init__( max_staleness=None, exact_staleness=None, multi_use=False, + transaction_id=None, ): super(Snapshot, self).__init__(session) opts = [read_timestamp, min_read_timestamp, max_staleness, exact_staleness] @@ -739,12 +754,14 @@ def __init__( "'min_read_timestamp' / 'max_staleness'" ) + self._transaction_read_timestamp = None self._strong = len(flagged) == 0 self._read_timestamp = read_timestamp self._min_read_timestamp = min_read_timestamp self._max_staleness = max_staleness self._exact_staleness = exact_staleness self._multi_use = multi_use + self._transaction_id = transaction_id def _make_txn_selector(self): """Helper for :meth:`read`.""" @@ -768,7 +785,9 @@ def _make_txn_selector(self): value = True options = TransactionOptions( - read_only=TransactionOptions.ReadOnly(**{key: value}) + read_only=TransactionOptions.ReadOnly( + **{key: value, "return_read_timestamp": True} + ) ) if self._multi_use: @@ -814,4 +833,5 @@ def begin(self): allowed_exceptions={InternalServerError: _check_rst_stream_error}, ) self._transaction_id = response.id + self._transaction_read_timestamp = response.read_timestamp return self._transaction_id diff --git a/google/cloud/spanner_v1/streamed.py b/google/cloud/spanner_v1/streamed.py index 80a452d558..ac8fc71ce6 100644 --- a/google/cloud/spanner_v1/streamed.py +++ b/google/cloud/spanner_v1/streamed.py @@ -190,6 +190,27 @@ def one_or_none(self): except StopIteration: return answer + def to_dict_list(self): + """Return the result of a query as a list of dictionaries. + In each dictionary the key is the column name and the value is the + value of the that column in a given row. + + :rtype: + :class:`list of dict` + :returns: result rows as a list of dictionaries + """ + rows = [] + for row in self: + rows.append( + { + column: value + for column, value in zip( + [column.name for column in self._metadata.row_type.fields], row + ) + } + ) + return rows + class Unmergeable(ValueError): """Unable to merge two values. diff --git a/google/cloud/spanner_v1/types/__init__.py b/google/cloud/spanner_v1/types/__init__.py index df0960d9d9..52b485d976 100644 --- a/google/cloud/spanner_v1/types/__init__.py +++ b/google/cloud/spanner_v1/types/__init__.py @@ -36,10 +36,13 @@ from .spanner import ( BatchCreateSessionsRequest, BatchCreateSessionsResponse, + BatchWriteRequest, + BatchWriteResponse, BeginTransactionRequest, CommitRequest, CreateSessionRequest, DeleteSessionRequest, + DirectedReadOptions, ExecuteBatchDmlRequest, ExecuteBatchDmlResponse, ExecuteSqlRequest, @@ -81,10 +84,13 @@ "ResultSetStats", "BatchCreateSessionsRequest", "BatchCreateSessionsResponse", + "BatchWriteRequest", + "BatchWriteResponse", "BeginTransactionRequest", "CommitRequest", "CreateSessionRequest", "DeleteSessionRequest", + "DirectedReadOptions", "ExecuteBatchDmlRequest", "ExecuteBatchDmlResponse", "ExecuteSqlRequest", diff --git a/google/cloud/spanner_v1/types/spanner.py b/google/cloud/spanner_v1/types/spanner.py index b69e61012e..3dbacbe26b 100644 --- a/google/cloud/spanner_v1/types/spanner.py +++ b/google/cloud/spanner_v1/types/spanner.py @@ -41,6 +41,7 @@ "ListSessionsResponse", "DeleteSessionRequest", "RequestOptions", + "DirectedReadOptions", "ExecuteSqlRequest", "ExecuteBatchDmlRequest", "ExecuteBatchDmlResponse", @@ -53,6 +54,8 @@ "BeginTransactionRequest", "CommitRequest", "RollbackRequest", + "BatchWriteRequest", + "BatchWriteResponse", }, ) @@ -379,6 +382,150 @@ class Priority(proto.Enum): ) +class DirectedReadOptions(proto.Message): + r"""The DirectedReadOptions can be used to indicate which replicas or + regions should be used for non-transactional reads or queries. + + DirectedReadOptions may only be specified for a read-only + transaction, otherwise the API will return an ``INVALID_ARGUMENT`` + error. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + include_replicas (google.cloud.spanner_v1.types.DirectedReadOptions.IncludeReplicas): + Include_replicas indicates the order of replicas (as they + appear in this list) to process the request. If + auto_failover_disabled is set to true and all replicas are + exhausted without finding a healthy replica, Spanner will + wait for a replica in the list to become available, requests + may fail due to ``DEADLINE_EXCEEDED`` errors. + + This field is a member of `oneof`_ ``replicas``. + exclude_replicas (google.cloud.spanner_v1.types.DirectedReadOptions.ExcludeReplicas): + Exclude_replicas indicates that should be excluded from + serving requests. Spanner will not route requests to the + replicas in this list. + + This field is a member of `oneof`_ ``replicas``. + """ + + class ReplicaSelection(proto.Message): + r"""The directed read replica selector. Callers must provide one or more + of the following fields for replica selection: + + - ``location`` - The location must be one of the regions within the + multi-region configuration of your database. + - ``type`` - The type of the replica. + + Some examples of using replica_selectors are: + + - ``location:us-east1`` --> The "us-east1" replica(s) of any + available type will be used to process the request. + - ``type:READ_ONLY`` --> The "READ_ONLY" type replica(s) in nearest + . available location will be used to process the request. + - ``location:us-east1 type:READ_ONLY`` --> The "READ_ONLY" type + replica(s) in location "us-east1" will be used to process the + request. + + Attributes: + location (str): + The location or region of the serving + requests, e.g. "us-east1". + type_ (google.cloud.spanner_v1.types.DirectedReadOptions.ReplicaSelection.Type): + The type of replica. + """ + + class Type(proto.Enum): + r"""Indicates the type of replica. + + Values: + TYPE_UNSPECIFIED (0): + Not specified. + READ_WRITE (1): + Read-write replicas support both reads and + writes. + READ_ONLY (2): + Read-only replicas only support reads (not + writes). + """ + TYPE_UNSPECIFIED = 0 + READ_WRITE = 1 + READ_ONLY = 2 + + location: str = proto.Field( + proto.STRING, + number=1, + ) + type_: "DirectedReadOptions.ReplicaSelection.Type" = proto.Field( + proto.ENUM, + number=2, + enum="DirectedReadOptions.ReplicaSelection.Type", + ) + + class IncludeReplicas(proto.Message): + r"""An IncludeReplicas contains a repeated set of + ReplicaSelection which indicates the order in which replicas + should be considered. + + Attributes: + replica_selections (MutableSequence[google.cloud.spanner_v1.types.DirectedReadOptions.ReplicaSelection]): + The directed read replica selector. + auto_failover_disabled (bool): + If true, Spanner will not route requests to a replica + outside the include_replicas list when all of the specified + replicas are unavailable or unhealthy. Default value is + ``false``. + """ + + replica_selections: MutableSequence[ + "DirectedReadOptions.ReplicaSelection" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="DirectedReadOptions.ReplicaSelection", + ) + auto_failover_disabled: bool = proto.Field( + proto.BOOL, + number=2, + ) + + class ExcludeReplicas(proto.Message): + r"""An ExcludeReplicas contains a repeated set of + ReplicaSelection that should be excluded from serving requests. + + Attributes: + replica_selections (MutableSequence[google.cloud.spanner_v1.types.DirectedReadOptions.ReplicaSelection]): + The directed read replica selector. + """ + + replica_selections: MutableSequence[ + "DirectedReadOptions.ReplicaSelection" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="DirectedReadOptions.ReplicaSelection", + ) + + include_replicas: IncludeReplicas = proto.Field( + proto.MESSAGE, + number=1, + oneof="replicas", + message=IncludeReplicas, + ) + exclude_replicas: ExcludeReplicas = proto.Field( + proto.MESSAGE, + number=2, + oneof="replicas", + message=ExcludeReplicas, + ) + + class ExecuteSqlRequest(proto.Message): r"""The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and @@ -390,6 +537,7 @@ class ExecuteSqlRequest(proto.Message): should be performed. transaction (google.cloud.spanner_v1.types.TransactionSelector): The transaction to use. + For queries, if none is provided, the default is a temporary read-only transaction with strong concurrency. @@ -399,6 +547,7 @@ class ExecuteSqlRequest(proto.Message): single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. + Partitioned DML requires an existing Partitioned DML transaction ID. sql (str): @@ -469,6 +618,7 @@ class ExecuteSqlRequest(proto.Message): sequence number, the transaction may be aborted. Replays of previously handled requests will yield the same response as the first execution. + Required for DML statements. Ignored for queries. query_options (google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions): @@ -476,14 +626,16 @@ class ExecuteSqlRequest(proto.Message): given query. request_options (google.cloud.spanner_v1.types.RequestOptions): Common options for this request. + directed_read_options (google.cloud.spanner_v1.types.DirectedReadOptions): + Directed read options for this request. data_boost_enabled (bool): If this is for a partitioned query and this field is set to - ``true``, the request will be executed via Spanner + ``true``, the request is executed with Spanner Data Boost independent compute resources. If the field is set to ``true`` but the request does not set - ``partition_token``, the API will return an - ``INVALID_ARGUMENT`` error. + ``partition_token``, the API returns an ``INVALID_ARGUMENT`` + error. """ class QueryMode(proto.Enum): @@ -623,6 +775,11 @@ class QueryOptions(proto.Message): number=11, message="RequestOptions", ) + directed_read_options: "DirectedReadOptions" = proto.Field( + proto.MESSAGE, + number=15, + message="DirectedReadOptions", + ) data_boost_enabled: bool = proto.Field( proto.BOOL, number=16, @@ -865,14 +1022,14 @@ class PartitionQueryRequest(proto.Message): sql (str): Required. The query request to generate partitions for. The request will fail if the query is not root partitionable. - The query plan of a root partitionable query has a single - distributed union operator. A distributed union operator - conceptually divides one or more tables into multiple - splits, remotely evaluates a subquery independently on each - split, and then unions all results. - - This must not contain DML commands, such as INSERT, UPDATE, - or DELETE. Use + For a query to be root partitionable, it needs to satisfy a + few conditions. For example, the first operator in the query + execution plan must be a distributed union operator. For + more information about other conditions, see `Read data in + parallel `__. + + The query request must not contain DML commands, such as + INSERT, UPDATE, or DELETE. Use [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] with a PartitionedDml transaction for large, partition-friendly DML operations. @@ -1137,14 +1294,16 @@ class ReadRequest(proto.Message): create this partition_token. request_options (google.cloud.spanner_v1.types.RequestOptions): Common options for this request. + directed_read_options (google.cloud.spanner_v1.types.DirectedReadOptions): + Directed read options for this request. data_boost_enabled (bool): If this is for a partitioned read and this field is set to - ``true``, the request will be executed via Spanner + ``true``, the request is executed with Spanner Data Boost independent compute resources. If the field is set to ``true`` but the request does not set - ``partition_token``, the API will return an - ``INVALID_ARGUMENT`` error. + ``partition_token``, the API returns an ``INVALID_ARGUMENT`` + error. """ session: str = proto.Field( @@ -1190,6 +1349,11 @@ class ReadRequest(proto.Message): number=11, message="RequestOptions", ) + directed_read_options: "DirectedReadOptions" = proto.Field( + proto.MESSAGE, + number=14, + message="DirectedReadOptions", + ) data_boost_enabled: bool = proto.Field( proto.BOOL, number=15, @@ -1326,4 +1490,83 @@ class RollbackRequest(proto.Message): ) +class BatchWriteRequest(proto.Message): + r"""The request for [BatchWrite][google.spanner.v1.Spanner.BatchWrite]. + + Attributes: + session (str): + Required. The session in which the batch + request is to be run. + request_options (google.cloud.spanner_v1.types.RequestOptions): + Common options for this request. + mutation_groups (MutableSequence[google.cloud.spanner_v1.types.BatchWriteRequest.MutationGroup]): + Required. The groups of mutations to be + applied. + """ + + class MutationGroup(proto.Message): + r"""A group of mutations to be committed together. Related + mutations should be placed in a group. For example, two + mutations inserting rows with the same primary key prefix in + both parent and child tables are related. + + Attributes: + mutations (MutableSequence[google.cloud.spanner_v1.types.Mutation]): + Required. The mutations in this group. + """ + + mutations: MutableSequence[mutation.Mutation] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=mutation.Mutation, + ) + + session: str = proto.Field( + proto.STRING, + number=1, + ) + request_options: "RequestOptions" = proto.Field( + proto.MESSAGE, + number=3, + message="RequestOptions", + ) + mutation_groups: MutableSequence[MutationGroup] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=MutationGroup, + ) + + +class BatchWriteResponse(proto.Message): + r"""The result of applying a batch of mutations. + + Attributes: + indexes (MutableSequence[int]): + The mutation groups applied in this batch. The values index + into the ``mutation_groups`` field in the corresponding + ``BatchWriteRequest``. + status (google.rpc.status_pb2.Status): + An ``OK`` status indicates success. Any other status + indicates a failure. + commit_timestamp (google.protobuf.timestamp_pb2.Timestamp): + The commit timestamp of the transaction that applied this + batch. Present if ``status`` is ``OK``, absent otherwise. + """ + + indexes: MutableSequence[int] = proto.RepeatedField( + proto.INT32, + number=1, + ) + status: status_pb2.Status = proto.Field( + proto.MESSAGE, + number=2, + message=status_pb2.Status, + ) + commit_timestamp: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_v1/types/transaction.py b/google/cloud/spanner_v1/types/transaction.py index d07b2f73c4..57761569d1 100644 --- a/google/cloud/spanner_v1/types/transaction.py +++ b/google/cloud/spanner_v1/types/transaction.py @@ -417,13 +417,16 @@ class ReadLockMode(proto.Enum): Values: READ_LOCK_MODE_UNSPECIFIED (0): Default value. + If the value is not specified, the pessimistic read lock is used. PESSIMISTIC (1): Pessimistic lock mode. + Read locks are acquired immediately on read. OPTIMISTIC (2): Optimistic lock mode. + Locks for reads within the transaction are not acquired on read. Instead the locks are acquired on a commit to validate that read/queried data diff --git a/google/cloud/spanner_v1/types/type.py b/google/cloud/spanner_v1/types/type.py index f3fa94b4a8..f25c465dd4 100644 --- a/google/cloud/spanner_v1/types/type.py +++ b/google/cloud/spanner_v1/types/type.py @@ -137,10 +137,17 @@ class TypeAnnotationCode(proto.Enum): PostgreSQL JSONB values. Currently this annotation is always needed for [JSON][google.spanner.v1.TypeCode.JSON] when a client interacts with PostgreSQL-enabled Spanner databases. + PG_OID (4): + PostgreSQL compatible OID type. This + annotation can be used by a client interacting + with PostgreSQL-enabled Spanner database to + specify that a value should be treated using the + semantics of the OID type. """ TYPE_ANNOTATION_CODE_UNSPECIFIED = 0 PG_NUMERIC = 2 PG_JSONB = 3 + PG_OID = 4 class Type(proto.Message): diff --git a/noxfile.py b/noxfile.py index 95fe0d2365..68b2c7f8cd 100644 --- a/noxfile.py +++ b/noxfile.py @@ -17,22 +17,24 @@ # Generated by synthtool. DO NOT EDIT! from __future__ import absolute_import + import os import pathlib import re import shutil +from typing import Dict, List import warnings import nox FLAKE8_VERSION = "flake8==6.1.0" -BLACK_VERSION = "black==22.3.0" -ISORT_VERSION = "isort==5.10.1" +BLACK_VERSION = "black[jupyter]==23.7.0" +ISORT_VERSION = "isort==5.11.0" LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" -UNIT_TEST_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +UNIT_TEST_PYTHON_VERSIONS: List[str] = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] UNIT_TEST_STANDARD_DEPENDENCIES = [ "mock", "asyncmock", @@ -40,25 +42,25 @@ "pytest-cov", "pytest-asyncio", ] -UNIT_TEST_EXTERNAL_DEPENDENCIES = [] -UNIT_TEST_LOCAL_DEPENDENCIES = [] -UNIT_TEST_DEPENDENCIES = [] -UNIT_TEST_EXTRAS = [] -UNIT_TEST_EXTRAS_BY_PYTHON = {} - -SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] -SYSTEM_TEST_STANDARD_DEPENDENCIES = [ +UNIT_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_LOCAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_DEPENDENCIES: List[str] = [] +UNIT_TEST_EXTRAS: List[str] = [] +UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} + +SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.8"] +SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [ "mock", "pytest", "google-cloud-testutils", ] -SYSTEM_TEST_EXTERNAL_DEPENDENCIES = [] -SYSTEM_TEST_LOCAL_DEPENDENCIES = [] -SYSTEM_TEST_DEPENDENCIES = [] -SYSTEM_TEST_EXTRAS = [ +SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_EXTRAS: List[str] = [ "tracing", ] -SYSTEM_TEST_EXTRAS_BY_PYTHON = {} +SYSTEM_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() @@ -71,6 +73,7 @@ "lint_setup_py", "blacken", "docs", + "format", ] # Error if a python version is missing @@ -170,9 +173,9 @@ def default(session): session.run( "py.test", "--quiet", - "--cov=google.cloud.spanner", - "--cov=google.cloud", - "--cov=tests.unit", + f"--junitxml=unit_{session.python}_sponge_log.xml", + "--cov=google", + "--cov=tests/unit", "--cov-append", "--cov-config=.coveragerc", "--cov-report=", @@ -210,7 +213,6 @@ def unit(session): def install_systemtest_dependencies(session, *constraints): - # Use pre-release gRPC for system tests. # Exclude version 1.52.0rc1 which has a known issue. # See https://github.com/grpc/grpc/issues/32163 @@ -342,7 +344,7 @@ def docs(session): ) -@nox.session(python="3.9") +@nox.session(python="3.10") def docfx(session): """Build the docfx yaml files for this library.""" diff --git a/owlbot.py b/owlbot.py index 90edb8cf86..7c249527b2 100644 --- a/owlbot.py +++ b/owlbot.py @@ -222,16 +222,6 @@ def place_before(path, text, *before_text, escape=None): escape="()", ) -s.replace( - "noxfile.py", - """f"--junitxml=unit_{session.python}_sponge_log.xml", - "--cov=google", - "--cov=tests/unit",""", - """\"--cov=google.cloud.spanner", - "--cov=google.cloud", - "--cov=tests.unit",""", -) - s.replace( "noxfile.py", r"""session.install\("-e", "."\)""", diff --git a/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json index 0ede9fccff..c6ea090f6d 100644 --- a/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json +++ b/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-spanner-admin-database", - "version": "3.40.1" + "version": "3.41.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json index 76f704e8fb..340d53926c 100644 --- a/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json +++ b/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-spanner-admin-instance", - "version": "3.40.1" + "version": "3.41.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.spanner.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.v1.json index a645b19356..cb86201769 100644 --- a/samples/generated_samples/snippet_metadata_google.spanner.v1.json +++ b/samples/generated_samples/snippet_metadata_google.spanner.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-spanner", - "version": "3.40.1" + "version": "3.41.0" }, "snippets": [ { @@ -180,6 +180,175 @@ ], "title": "spanner_v1_generated_spanner_batch_create_sessions_sync.py" }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.spanner_v1.SpannerAsyncClient", + "shortName": "SpannerAsyncClient" + }, + "fullName": "google.cloud.spanner_v1.SpannerAsyncClient.batch_write", + "method": { + "fullName": "google.spanner.v1.Spanner.BatchWrite", + "service": { + "fullName": "google.spanner.v1.Spanner", + "shortName": "Spanner" + }, + "shortName": "BatchWrite" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.spanner_v1.types.BatchWriteRequest" + }, + { + "name": "session", + "type": "str" + }, + { + "name": "mutation_groups", + "type": "MutableSequence[google.cloud.spanner_v1.types.BatchWriteRequest.MutationGroup]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.spanner_v1.types.BatchWriteResponse]", + "shortName": "batch_write" + }, + "description": "Sample for BatchWrite", + "file": "spanner_v1_generated_spanner_batch_write_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "spanner_v1_generated_Spanner_BatchWrite_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "spanner_v1_generated_spanner_batch_write_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.spanner_v1.SpannerClient", + "shortName": "SpannerClient" + }, + "fullName": "google.cloud.spanner_v1.SpannerClient.batch_write", + "method": { + "fullName": "google.spanner.v1.Spanner.BatchWrite", + "service": { + "fullName": "google.spanner.v1.Spanner", + "shortName": "Spanner" + }, + "shortName": "BatchWrite" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.spanner_v1.types.BatchWriteRequest" + }, + { + "name": "session", + "type": "str" + }, + { + "name": "mutation_groups", + "type": "MutableSequence[google.cloud.spanner_v1.types.BatchWriteRequest.MutationGroup]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.spanner_v1.types.BatchWriteResponse]", + "shortName": "batch_write" + }, + "description": "Sample for BatchWrite", + "file": "spanner_v1_generated_spanner_batch_write_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "spanner_v1_generated_Spanner_BatchWrite_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "spanner_v1_generated_spanner_batch_write_sync.py" + }, { "canonical": true, "clientMethod": { diff --git a/samples/generated_samples/spanner_v1_generated_spanner_batch_write_async.py b/samples/generated_samples/spanner_v1_generated_spanner_batch_write_async.py new file mode 100644 index 0000000000..39352562b1 --- /dev/null +++ b/samples/generated_samples/spanner_v1_generated_spanner_batch_write_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchWrite +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-spanner + + +# [START spanner_v1_generated_Spanner_BatchWrite_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import spanner_v1 + + +async def sample_batch_write(): + # Create a client + client = spanner_v1.SpannerAsyncClient() + + # Initialize request argument(s) + mutation_groups = spanner_v1.MutationGroup() + mutation_groups.mutations.insert.table = "table_value" + + request = spanner_v1.BatchWriteRequest( + session="session_value", + mutation_groups=mutation_groups, + ) + + # Make the request + stream = await client.batch_write(request=request) + + # Handle the response + async for response in stream: + print(response) + +# [END spanner_v1_generated_Spanner_BatchWrite_async] diff --git a/samples/generated_samples/spanner_v1_generated_spanner_batch_write_sync.py b/samples/generated_samples/spanner_v1_generated_spanner_batch_write_sync.py new file mode 100644 index 0000000000..4ee88b0cd6 --- /dev/null +++ b/samples/generated_samples/spanner_v1_generated_spanner_batch_write_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchWrite +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-spanner + + +# [START spanner_v1_generated_Spanner_BatchWrite_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import spanner_v1 + + +def sample_batch_write(): + # Create a client + client = spanner_v1.SpannerClient() + + # Initialize request argument(s) + mutation_groups = spanner_v1.MutationGroup() + mutation_groups.mutations.insert.table = "table_value" + + request = spanner_v1.BatchWriteRequest( + session="session_value", + mutation_groups=mutation_groups, + ) + + # Make the request + stream = client.batch_write(request=request) + + # Handle the response + for response in stream: + print(response) + +# [END spanner_v1_generated_Spanner_BatchWrite_sync] diff --git a/samples/samples/noxfile.py b/samples/samples/noxfile.py index 7c8a63994c..483b559017 100644 --- a/samples/samples/noxfile.py +++ b/samples/samples/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/samples/requirements-test.txt b/samples/samples/requirements-test.txt index ef7c9216af..bf07e9eaad 100644 --- a/samples/samples/requirements-test.txt +++ b/samples/samples/requirements-test.txt @@ -1,4 +1,4 @@ -pytest==7.3.1 +pytest==7.4.3 pytest-dependency==0.5.1 -mock==5.0.2 -google-cloud-testutils==1.3.3 +mock==5.1.0 +google-cloud-testutils==1.4.0 diff --git a/samples/samples/requirements.txt b/samples/samples/requirements.txt index 4ca3a436c6..7747037537 100644 --- a/samples/samples/requirements.txt +++ b/samples/samples/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-spanner==3.35.1 +google-cloud-spanner==3.40.1 futures==3.4.0; python_version < "3" diff --git a/samples/samples/snippets.py b/samples/samples/snippets.py index 82fb95a0dd..3ffd579f4a 100644 --- a/samples/samples/snippets.py +++ b/samples/samples/snippets.py @@ -31,6 +31,7 @@ from google.cloud import spanner from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin from google.cloud.spanner_v1 import param_types +from google.cloud.spanner_v1 import DirectedReadOptions from google.type import expr_pb2 from google.iam.v1 import policy_pb2 from google.cloud.spanner_v1.data_types import JsonObject @@ -403,6 +404,65 @@ def insert_data(instance_id, database_id): # [END spanner_insert_data] +# [START spanner_batch_write_at_least_once] +def batch_write(instance_id, database_id): + """Inserts sample data into the given database via BatchWrite API. + + The database and table must already exist and can be created using + `create_database`. + """ + from google.rpc.code_pb2 import OK + + spanner_client = spanner.Client() + instance = spanner_client.instance(instance_id) + database = instance.database(database_id) + + with database.mutation_groups() as groups: + group1 = groups.group() + group1.insert_or_update( + table="Singers", + columns=("SingerId", "FirstName", "LastName"), + values=[ + (16, "Scarlet", "Terry"), + ], + ) + + group2 = groups.group() + group2.insert_or_update( + table="Singers", + columns=("SingerId", "FirstName", "LastName"), + values=[ + (17, "Marc", ""), + (18, "Catalina", "Smith"), + ], + ) + group2.insert_or_update( + table="Albums", + columns=("SingerId", "AlbumId", "AlbumTitle"), + values=[ + (17, 1, "Total Junk"), + (18, 2, "Go, Go, Go"), + ], + ) + + for response in groups.batch_write(): + if response.status.code == OK: + print( + "Mutation group indexes {} have been applied with commit timestamp {}".format( + response.indexes, response.commit_timestamp + ) + ) + else: + print( + "Mutation group indexes {} could not be applied with error {}".format( + response.indexes, response.status + ) + ) + + +# [END spanner_batch_write_at_least_once] + + # [START spanner_delete_data] def delete_data(instance_id, database_id): """Deletes sample data from the given database. @@ -2664,6 +2724,78 @@ def drop_sequence(instance_id, database_id): # [END spanner_drop_sequence] + +def directed_read_options( + instance_id, + database_id, +): + """ + Shows how to run an execute sql request with directed read options. + Only one of exclude_replicas or include_replicas can be set + Each accepts a list of replicaSelections which contains location and type + * `location` - The location must be one of the regions within the + multi-region configuration of your database. + * `type_` - The type of the replica + Some examples of using replica_selectors are: + * `location:us-east1` --> The "us-east1" replica(s) of any available type + will be used to process the request. + * `type:READ_ONLY` --> The "READ_ONLY" type replica(s) in nearest + available location will be used to process the + request. + * `location:us-east1 type:READ_ONLY` --> The "READ_ONLY" type replica(s) + in location "us-east1" will be used to process + the request. + include_replicas also contains an option for auto_failover_disabled which when set + Spanner will not route requests to a replica outside the + include_replicas list when all the specified replicas are unavailable + or unhealthy. The default value is `false` + """ + # [START spanner_directed_read] + # instance_id = "your-spanner-instance" + # database_id = "your-spanner-db-id" + + directed_read_options_for_client = { + "exclude_replicas": { + "replica_selections": [ + { + "location": "us-east4", + }, + ], + }, + } + + # directed_read_options can be set at client level and will be used in all + # read-only transaction requests + spanner_client = spanner.Client( + directed_read_options=directed_read_options_for_client + ) + instance = spanner_client.instance(instance_id) + database = instance.database(database_id) + + directed_read_options_for_request = { + "include_replicas": { + "replica_selections": [ + { + "type_": DirectedReadOptions.ReplicaSelection.Type.READ_ONLY, + }, + ], + "auto_failover_disabled": True, + }, + } + + with database.snapshot() as snapshot: + # Read rows while passing directed_read_options directly to the query. + # These will override the options passed at Client level. + results = snapshot.execute_sql( + "SELECT SingerId, AlbumId, AlbumTitle FROM Albums", + directed_read_options=directed_read_options_for_request, + ) + + for row in results: + print("SingerId: {}, AlbumId: {}, AlbumTitle: {}".format(*row)) + # [END spanner_directed_read] + + if __name__ == "__main__": # noqa: C901 parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter @@ -2677,6 +2809,7 @@ def drop_sequence(instance_id, database_id): subparsers.add_parser("create_instance", help=create_instance.__doc__) subparsers.add_parser("create_database", help=create_database.__doc__) subparsers.add_parser("insert_data", help=insert_data.__doc__) + subparsers.add_parser("batch_write", help=batch_write.__doc__) subparsers.add_parser("delete_data", help=delete_data.__doc__) subparsers.add_parser("query_data", help=query_data.__doc__) subparsers.add_parser("read_data", help=read_data.__doc__) @@ -2802,6 +2935,7 @@ def drop_sequence(instance_id, database_id): "--database_role", default="new_parent" ) enable_fine_grained_access_parser.add_argument("--title", default="condition title") + subparsers.add_parser("directed_read_options", help=directed_read_options.__doc__) args = parser.parse_args() @@ -2811,6 +2945,8 @@ def drop_sequence(instance_id, database_id): create_database(args.instance_id, args.database_id) elif args.command == "insert_data": insert_data(args.instance_id, args.database_id) + elif args.command == "batch_write": + batch_write(args.instance_id, args.database_id) elif args.command == "delete_data": delete_data(args.instance_id, args.database_id) elif args.command == "query_data": @@ -2931,3 +3067,5 @@ def drop_sequence(instance_id, database_id): args.database_role, args.title, ) + elif args.command == "directed_read_options": + directed_read_options(args.instance_id, args.database_id) diff --git a/samples/samples/snippets_test.py b/samples/samples/snippets_test.py index 22b5b6f944..a49a4ee480 100644 --- a/samples/samples/snippets_test.py +++ b/samples/samples/snippets_test.py @@ -290,6 +290,13 @@ def test_insert_data(capsys, instance_id, sample_database): assert "Inserted data" in out +@pytest.mark.dependency(name="batch_write") +def test_batch_write(capsys, instance_id, sample_database): + snippets.batch_write(instance_id, sample_database.database_id) + out, _ = capsys.readouterr() + assert "could not be applied with error" not in out + + @pytest.mark.dependency(depends=["insert_data"]) def test_delete_data(capsys, instance_id, sample_database): snippets.delete_data(instance_id, sample_database.database_id) @@ -845,3 +852,10 @@ def test_drop_sequence(capsys, instance_id, bit_reverse_sequence_database): "Altered Customers table to drop DEFAULT from CustomerId column and dropped the Seq sequence on database" in out ) + + +@pytest.mark.dependency(depends=["insert_data"]) +def test_directed_read_options(capsys, instance_id, sample_database): + snippets.directed_read_options(instance_id, sample_database.database_id) + out, _ = capsys.readouterr() + assert "SingerId: 1, AlbumId: 1, AlbumTitle: Total Junk" in out diff --git a/scripts/fixup_spanner_v1_keywords.py b/scripts/fixup_spanner_v1_keywords.py index df4d3501f2..f79f70b2dd 100644 --- a/scripts/fixup_spanner_v1_keywords.py +++ b/scripts/fixup_spanner_v1_keywords.py @@ -40,20 +40,21 @@ class spannerCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'batch_create_sessions': ('database', 'session_count', 'session_template', ), + 'batch_write': ('session', 'mutation_groups', 'request_options', ), 'begin_transaction': ('session', 'options', 'request_options', ), 'commit': ('session', 'transaction_id', 'single_use_transaction', 'mutations', 'return_commit_stats', 'request_options', ), 'create_session': ('database', 'session', ), 'delete_session': ('name', ), 'execute_batch_dml': ('session', 'transaction', 'statements', 'seqno', 'request_options', ), - 'execute_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', 'request_options', 'data_boost_enabled', ), - 'execute_streaming_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', 'request_options', 'data_boost_enabled', ), + 'execute_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', 'request_options', 'directed_read_options', 'data_boost_enabled', ), + 'execute_streaming_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', 'request_options', 'directed_read_options', 'data_boost_enabled', ), 'get_session': ('name', ), 'list_sessions': ('database', 'page_size', 'page_token', 'filter', ), 'partition_query': ('session', 'sql', 'transaction', 'params', 'param_types', 'partition_options', ), 'partition_read': ('session', 'table', 'key_set', 'transaction', 'index', 'columns', 'partition_options', ), - 'read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', 'request_options', 'data_boost_enabled', ), + 'read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', 'request_options', 'directed_read_options', 'data_boost_enabled', ), 'rollback': ('session', 'transaction_id', ), - 'streaming_read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', 'request_options', 'data_boost_enabled', ), + 'streaming_read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', 'request_options', 'directed_read_options', 'data_boost_enabled', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: diff --git a/setup.py b/setup.py index 7f72131638..ec4d94c05e 100644 --- a/setup.py +++ b/setup.py @@ -37,11 +37,13 @@ dependencies = [ "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", - "google-cloud-core >= 1.4.1, < 3.0dev", + "google-cloud-core >= 1.4.4, < 3.0dev", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", "proto-plus >= 1.22.0, <2.0.0dev", "sqlparse >= 0.4.4", + "proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'", "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", + "deprecated >= 1.2.14", ] extras = { "tracing": [ @@ -62,14 +64,10 @@ packages = [ package - for package in setuptools.PEP420PackageFinder.find() + for package in setuptools.find_namespace_packages() if package.startswith("google") ] -namespaces = ["google"] -if "google.cloud" in packages: - namespaces.append("google.cloud") - setuptools.setup( name=name, version=version, @@ -89,12 +87,13 @@ "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Operating System :: OS Independent", "Topic :: Internet", ], platforms="Posix; MacOS X; Windows", packages=packages, - namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, python_requires=">=3.7", diff --git a/testing/constraints-3.7.txt b/testing/constraints-3.7.txt index cddc7be6e5..165814fd90 100644 --- a/testing/constraints-3.7.txt +++ b/testing/constraints-3.7.txt @@ -5,7 +5,7 @@ # e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", # Then this file should have google-cloud-foo==1.14.0 google-api-core==1.34.0 -google-cloud-core==1.4.1 +google-cloud-core==1.4.4 grpc-google-iam-v1==0.12.4 libcst==0.2.5 proto-plus==1.22.0 diff --git a/tests/system/_sample_data.py b/tests/system/_sample_data.py index a7f3b80a86..9c83f42224 100644 --- a/tests/system/_sample_data.py +++ b/tests/system/_sample_data.py @@ -27,6 +27,14 @@ (2, "Bharney", "Rhubble", "bharney@example.com"), (3, "Wylma", "Phlyntstone", "wylma@example.com"), ) +BATCH_WRITE_ROW_DATA = ( + (1, "Phred", "Phlyntstone", "phred@example.com"), + (2, "Bharney", "Rhubble", "bharney@example.com"), + (3, "Wylma", "Phlyntstone", "wylma@example.com"), + (4, "Pebbles", "Phlyntstone", "pebbles@example.com"), + (5, "Betty", "Rhubble", "betty@example.com"), + (6, "Slate", "Stephenson", "slate@example.com"), +) ALL = spanner_v1.KeySet(all_=True) SQL = "SELECT * FROM contacts ORDER BY contact_id" @@ -70,7 +78,6 @@ def _check_row_data(row_data, expected, recurse_into_lists=True): def _check_cell_data(found_cell, expected_cell, recurse_into_lists=True): - if isinstance(found_cell, datetime_helpers.DatetimeWithNanoseconds): _assert_timestamp(expected_cell, found_cell) diff --git a/tests/system/conftest.py b/tests/system/conftest.py index fdeab14c8f..b297d1f2ad 100644 --- a/tests/system/conftest.py +++ b/tests/system/conftest.py @@ -119,7 +119,6 @@ def instance_configs(spanner_client): configs = list(_helpers.retry_503(spanner_client.list_instance_configs)()) if not _helpers.USE_EMULATOR: - # Defend against back-end returning configs for regions we aren't # actually allowed to use. configs = [config for config in configs if "-us-" in config.name] diff --git a/tests/system/test_database_api.py b/tests/system/test_database_api.py index 153567810a..052e628188 100644 --- a/tests/system/test_database_api.py +++ b/tests/system/test_database_api.py @@ -22,6 +22,7 @@ from google.cloud import spanner_v1 from google.cloud.spanner_v1.pool import FixedSizePool, PingingPool from google.cloud.spanner_admin_database_v1 import DatabaseDialect +from google.cloud.spanner_v1 import DirectedReadOptions from google.type import expr_pb2 from . import _helpers from . import _sample_data @@ -31,6 +32,17 @@ FKADC_CUSTOMERS_COLUMNS = ("CustomerId", "CustomerName") FKADC_SHOPPING_CARTS_COLUMNS = ("CartId", "CustomerId", "CustomerName") ALL_KEYSET = spanner_v1.KeySet(all_=True) +DIRECTED_READ_OPTIONS = { + "include_replicas": { + "replica_selections": [ + { + "location": "us-west1", + "type_": DirectedReadOptions.ReplicaSelection.Type.READ_ONLY, + }, + ], + "auto_failover_disabled": True, + }, +} @pytest.fixture(scope="module") @@ -740,3 +752,70 @@ def test_update_database_invalid(not_emulator, shared_database): # Empty `fields` is not supported. with pytest.raises(exceptions.InvalidArgument): shared_database.update([]) + + +def test_snapshot_read_w_directed_read_options( + shared_database, not_postgres, not_emulator +): + _helpers.retry_has_all_dll(shared_database.reload)() + table = "users_history" + columns = ["id", "commit_ts", "name", "email", "deleted"] + user_id = 1234 + name = "phred" + email = "phred@example.com" + row_data = [[user_id, spanner_v1.COMMIT_TIMESTAMP, name, email, False]] + sd = _sample_data + + with shared_database.batch() as batch: + batch.delete(table, sd.ALL) + batch.insert(table, columns, row_data) + + with shared_database.snapshot() as snapshot: + rows = list( + snapshot.read( + table, columns, sd.ALL, directed_read_options=DIRECTED_READ_OPTIONS + ) + ) + + assert len(rows) == 1 + + +def test_execute_sql_w_directed_read_options( + shared_database, not_postgres, not_emulator +): + _helpers.retry_has_all_dll(shared_database.reload)() + sd = _sample_data + + with shared_database.batch() as batch: + batch.delete(sd.TABLE, sd.ALL) + + def _unit_of_work(transaction, test): + transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA) + + shared_database.run_in_transaction(_unit_of_work, test=sd) + + with shared_database.snapshot() as snapshot: + rows = list( + snapshot.execute_sql(sd.SQL, directed_read_options=DIRECTED_READ_OPTIONS) + ) + sd._check_rows_data(rows) + + +def test_readwrite_transaction_w_directed_read_options_w_error( + shared_database, not_emulator, not_postgres +): + _helpers.retry_has_all_dll(shared_database.reload)() + sd = _sample_data + + def _transaction_read(transaction): + list( + transaction.read( + sd.TABLE, + sd.COLUMNS, + sd.ALL, + directed_read_options=DIRECTED_READ_OPTIONS, + ) + ) + + with pytest.raises(exceptions.InvalidArgument): + shared_database.run_in_transaction(_transaction_read) diff --git a/tests/system/test_dbapi.py b/tests/system/test_dbapi.py index cb5a11e89d..aa3fd610e1 100644 --- a/tests/system/test_dbapi.py +++ b/tests/system/test_dbapi.py @@ -20,14 +20,14 @@ from google.cloud import spanner_v1 from google.cloud._helpers import UTC -from google.cloud.spanner_dbapi.connection import connect -from google.cloud.spanner_dbapi.connection import Connection -from google.cloud.spanner_dbapi.exceptions import ProgrammingError + +from google.cloud.spanner_dbapi.connection import Connection, connect +from google.cloud.spanner_dbapi.exceptions import ProgrammingError, OperationalError from google.cloud.spanner_v1 import JsonObject from google.cloud.spanner_v1 import gapic_version as package_version +from google.api_core.datetime_helpers import DatetimeWithNanoseconds from . import _helpers - DATABASE_NAME = "dbapi-txn" DDL_STATEMENTS = ( @@ -43,10 +43,10 @@ @pytest.fixture(scope="session") def raw_database(shared_instance, database_operation_timeout, not_postgres): - databse_id = _helpers.unique_id("dbapi-txn") + database_id = _helpers.unique_id("dbapi-txn") pool = spanner_v1.BurstyPool(labels={"testcase": "database_api"}) database = shared_instance.database( - databse_id, + database_id, ddl_statements=DDL_STATEMENTS, pool=pool, ) @@ -58,607 +58,1217 @@ def raw_database(shared_instance, database_operation_timeout, not_postgres): database.drop() -def clear_table(transaction): - transaction.execute_update("DELETE FROM contacts WHERE true") +class TestDbApi: + @staticmethod + def clear_table(transaction): + transaction.execute_update("DELETE FROM contacts WHERE true") + @pytest.fixture(scope="function") + def dbapi_database(self, raw_database): + raw_database.run_in_transaction(self.clear_table) -@pytest.fixture(scope="function") -def dbapi_database(raw_database): + yield raw_database - raw_database.run_in_transaction(clear_table) + raw_database.run_in_transaction(self.clear_table) - yield raw_database - - raw_database.run_in_transaction(clear_table) + @pytest.fixture(autouse=True) + def init_connection(self, request, shared_instance, dbapi_database): + if "noautofixt" not in request.keywords: + self._conn = Connection(shared_instance, dbapi_database) + self._cursor = self._conn.cursor() + yield + if "noautofixt" not in request.keywords: + self._cursor.close() + self._conn.close() + def _execute_common_statements(self, cursor): + # execute several DML statements within one transaction + cursor.execute( + """ + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (1, 'first-name', 'last-name', 'test.email@domen.ru') + """ + ) + cursor.execute( + """ + UPDATE contacts + SET first_name = 'updated-first-name' + WHERE first_name = 'first-name' + """ + ) + cursor.execute( + """ + UPDATE contacts + SET email = 'test.email_updated@domen.ru' + WHERE email = 'test.email@domen.ru' + """ + ) + return ( + 1, + "updated-first-name", + "last-name", + "test.email_updated@domen.ru", + ) -def test_commit(shared_instance, dbapi_database): - """Test committing a transaction with several statements.""" - want_row = ( - 1, - "updated-first-name", - "last-name", - "test.email_updated@domen.ru", - ) - # connect to the test database - conn = Connection(shared_instance, dbapi_database) - cursor = conn.cursor() + @pytest.mark.parametrize("client_side", [True, False]) + def test_commit(self, client_side): + """Test committing a transaction with several statements.""" + updated_row = self._execute_common_statements(self._cursor) + if client_side: + self._cursor.execute("""COMMIT""") + else: + self._conn.commit() + + # read the resulting data from the database + self._cursor.execute("SELECT * FROM contacts") + got_rows = self._cursor.fetchall() + self._conn.commit() + + assert got_rows == [updated_row] + + @pytest.mark.skip(reason="b/315807641") + def test_commit_exception(self): + """Test that if exception during commit method is caught, then + subsequent operations on same Cursor and Connection object works + properly.""" + self._execute_common_statements(self._cursor) + # deleting the session to fail the commit + self._conn._session.delete() + try: + self._conn.commit() + except Exception: + pass + + # Testing that the connection and Cursor are in proper state post commit + # and a new transaction is started + updated_row = self._execute_common_statements(self._cursor) + self._cursor.execute("SELECT * FROM contacts") + got_rows = self._cursor.fetchall() + self._conn.commit() + + assert got_rows == [updated_row] + + @pytest.mark.skip(reason="b/315807641") + def test_rollback_exception(self): + """Test that if exception during rollback method is caught, then + subsequent operations on same Cursor and Connection object works + properly.""" + self._execute_common_statements(self._cursor) + # deleting the session to fail the rollback + self._conn._session.delete() + try: + self._conn.rollback() + except Exception: + pass + + # Testing that the connection and Cursor are in proper state post + # exception in rollback and a new transaction is started + updated_row = self._execute_common_statements(self._cursor) + self._cursor.execute("SELECT * FROM contacts") + got_rows = self._cursor.fetchall() + self._conn.commit() + + assert got_rows == [updated_row] + + @pytest.mark.skip(reason="b/315807641") + def test_cursor_execute_exception(self): + """Test that if exception in Cursor's execute method is caught when + Connection is not in autocommit mode, then subsequent operations on + same Cursor and Connection object works properly.""" + updated_row = self._execute_common_statements(self._cursor) + try: + self._cursor.execute("SELECT * FROM unknown_table") + except Exception: + pass + self._cursor.execute("SELECT * FROM contacts") + got_rows = self._cursor.fetchall() + self._conn.commit() + assert got_rows == [updated_row] + + # Testing that the connection and Cursor are in proper state post commit + # and a new transaction is started + self._cursor.execute("SELECT * FROM contacts") + got_rows = self._cursor.fetchall() + self._conn.commit() + assert got_rows == [updated_row] + + def test_cursor_execute_exception_autocommit(self): + """Test that if exception in Cursor's execute method is caught when + Connection is in autocommit mode, then subsequent operations on + same Cursor and Connection object works properly.""" + self._conn.autocommit = True + updated_row = self._execute_common_statements(self._cursor) + try: + self._cursor.execute("SELECT * FROM unknown_table") + except Exception: + pass + self._cursor.execute("SELECT * FROM contacts") + got_rows = self._cursor.fetchall() + assert got_rows == [updated_row] + + def test_cursor_execute_exception_begin_client_side(self): + """Test that if exception in Cursor's execute method is caught when + beginning a transaction using client side statement, then subsequent + operations on same Cursor and Connection object works properly.""" + self._conn.autocommit = True + self._cursor.execute("begin transaction") + updated_row = self._execute_common_statements(self._cursor) + try: + self._cursor.execute("SELECT * FROM unknown_table") + except Exception: + pass + self._cursor.execute("SELECT * FROM contacts") + got_rows = self._cursor.fetchall() + self._conn.commit() + assert got_rows == [updated_row] + + # Testing that the connection and Cursor are in proper state post commit + self._conn.autocommit = False + self._cursor.execute("SELECT * FROM contacts") + got_rows = self._cursor.fetchall() + self._conn.commit() + assert got_rows == [updated_row] + + @pytest.mark.noautofixt + def test_begin_client_side(self, shared_instance, dbapi_database): + """Test beginning a transaction using client side statement, + where connection is in autocommit mode.""" + + conn1 = Connection(shared_instance, dbapi_database) + conn1.autocommit = True + cursor1 = conn1.cursor() + cursor1.execute("begin transaction") + updated_row = self._execute_common_statements(cursor1) + + assert conn1._transaction_begin_marked is True + conn1.commit() + assert conn1._transaction_begin_marked is False + cursor1.close() + conn1.close() + + # As the connection conn1 is committed a new connection should see its results + conn3 = Connection(shared_instance, dbapi_database) + cursor3 = conn3.cursor() + cursor3.execute("SELECT * FROM contacts") + conn3.commit() + got_rows = cursor3.fetchall() + cursor3.close() + conn3.close() + assert got_rows == [updated_row] + + def test_begin_and_commit(self): + """Test beginning and then committing a transaction is a Noop""" + self._cursor.execute("begin transaction") + self._cursor.execute("commit transaction") + self._cursor.execute("SELECT * FROM contacts") + self._conn.commit() + assert self._cursor.fetchall() == [] + + def test_begin_and_rollback(self): + """Test beginning and then rolling back a transaction is a Noop""" + self._cursor.execute("begin transaction") + self._cursor.execute("rollback transaction") + self._cursor.execute("SELECT * FROM contacts") + self._conn.commit() + assert self._cursor.fetchall() == [] + + def test_read_and_commit_timestamps(self): + """Test COMMIT_TIMESTAMP is not available after read statement and + READ_TIMESTAMP is not available after write statement in autocommit + mode.""" + self._conn.autocommit = True + self._cursor.execute("SELECT * FROM contacts") + self._cursor.execute( + """ + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (1, 'first-name', 'last-name', 'test.email@domen.ru') + """ + ) - # execute several DML statements within one transaction - cursor.execute( - """ -INSERT INTO contacts (contact_id, first_name, last_name, email) -VALUES (1, 'first-name', 'last-name', 'test.email@domen.ru') - """ - ) - cursor.execute( - """ -UPDATE contacts -SET first_name = 'updated-first-name' -WHERE first_name = 'first-name' -""" - ) - cursor.execute( - """ -UPDATE contacts -SET email = 'test.email_updated@domen.ru' -WHERE email = 'test.email@domen.ru' -""" - ) - conn.commit() + self._cursor.execute("SHOW VARIABLE COMMIT_TIMESTAMP") + got_rows = self._cursor.fetchall() + assert len(got_rows) == 1 - # read the resulting data from the database - cursor.execute("SELECT * FROM contacts") - got_rows = cursor.fetchall() - conn.commit() + self._cursor.execute("SHOW VARIABLE READ_TIMESTAMP") + got_rows = self._cursor.fetchall() + assert len(got_rows) == 0 - assert got_rows == [want_row] + self._cursor.execute("SELECT * FROM contacts") - cursor.close() - conn.close() + self._cursor.execute("SHOW VARIABLE COMMIT_TIMESTAMP") + got_rows = self._cursor.fetchall() + assert len(got_rows) == 0 + self._cursor.execute("SHOW VARIABLE READ_TIMESTAMP") + got_rows = self._cursor.fetchall() + assert len(got_rows) == 1 -def test_rollback(shared_instance, dbapi_database): - """Test rollbacking a transaction with several statements.""" - want_row = (2, "first-name", "last-name", "test.email@domen.ru") - # connect to the test database - conn = Connection(shared_instance, dbapi_database) - cursor = conn.cursor() + def test_commit_timestamp_client_side_transaction(self): + """Test executing SHOW_COMMIT_TIMESTAMP client side statement in a + transaction.""" - cursor.execute( + self._cursor.execute( + """ + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (1, 'first-name', 'last-name', 'test.email@domen.ru') """ -INSERT INTO contacts (contact_id, first_name, last_name, email) -VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru') - """ - ) - conn.commit() + ) + self._cursor.execute("SHOW VARIABLE COMMIT_TIMESTAMP") + got_rows = self._cursor.fetchall() + # As the connection is not committed we will get 0 rows + assert len(got_rows) == 0 + assert len(self._cursor.description) == 1 - # execute several DMLs with one transaction - cursor.execute( - """ -UPDATE contacts -SET first_name = 'updated-first-name' -WHERE first_name = 'first-name' -""" - ) - cursor.execute( + self._cursor.execute( + """ + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru') """ -UPDATE contacts -SET email = 'test.email_updated@domen.ru' -WHERE email = 'test.email@domen.ru' -""" - ) - conn.rollback() - - # read the resulting data from the database - cursor.execute("SELECT * FROM contacts") - got_rows = cursor.fetchall() - conn.commit() - - assert got_rows == [want_row] - - cursor.close() - conn.close() + ) + self._conn.commit() + self._cursor.execute("SHOW VARIABLE COMMIT_TIMESTAMP") + got_rows = self._cursor.fetchall() + assert len(got_rows) == 1 + assert len(got_rows[0]) == 1 + assert len(self._cursor.description) == 1 + assert self._cursor.description[0].name == "SHOW_COMMIT_TIMESTAMP" + assert isinstance(got_rows[0][0], DatetimeWithNanoseconds) -def test_autocommit_mode_change(shared_instance, dbapi_database): - """Test auto committing a transaction on `autocommit` mode change.""" - want_row = ( - 2, - "updated-first-name", - "last-name", - "test.email@domen.ru", - ) - # connect to the test database - conn = Connection(shared_instance, dbapi_database) - cursor = conn.cursor() + def test_commit_timestamp_client_side_autocommit(self): + """Test executing SHOW_COMMIT_TIMESTAMP client side statement in a + transaction when connection is in autocommit mode.""" - cursor.execute( - """ -INSERT INTO contacts (contact_id, first_name, last_name, email) -VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru') - """ - ) - cursor.execute( + self._conn.autocommit = True + self._cursor.execute( + """ + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru') """ -UPDATE contacts -SET first_name = 'updated-first-name' -WHERE first_name = 'first-name' -""" - ) - conn.autocommit = True + ) + self._cursor.execute("SHOW VARIABLE COMMIT_TIMESTAMP") - # read the resulting data from the database - cursor.execute("SELECT * FROM contacts") - got_rows = cursor.fetchall() + got_rows = self._cursor.fetchall() + assert len(got_rows) == 1 + assert len(got_rows[0]) == 1 + assert len(self._cursor.description) == 1 + assert self._cursor.description[0].name == "SHOW_COMMIT_TIMESTAMP" + assert isinstance(got_rows[0][0], DatetimeWithNanoseconds) - assert got_rows == [want_row] + def test_read_timestamp_client_side(self): + """Test executing SHOW_READ_TIMESTAMP client side statement in a + transaction.""" - cursor.close() - conn.close() + self._conn.read_only = True + self._cursor.execute("SELECT * FROM contacts") + assert self._cursor.fetchall() == [] + self._cursor.execute("SHOW VARIABLE READ_TIMESTAMP") + read_timestamp_query_result_1 = self._cursor.fetchall() -def test_rollback_on_connection_closing(shared_instance, dbapi_database): - """ - When closing a connection all the pending transactions - must be rollbacked. Testing if it's working this way. - """ - want_row = (1, "first-name", "last-name", "test.email@domen.ru") - # connect to the test database - conn = Connection(shared_instance, dbapi_database) - cursor = conn.cursor() + self._cursor.execute("SELECT * FROM contacts") + assert self._cursor.fetchall() == [] - cursor.execute( - """ -INSERT INTO contacts (contact_id, first_name, last_name, email) -VALUES (1, 'first-name', 'last-name', 'test.email@domen.ru') - """ - ) - conn.commit() - - cursor.execute( - """ -UPDATE contacts -SET first_name = 'updated-first-name' -WHERE first_name = 'first-name' -""" - ) - conn.close() + self._cursor.execute("SHOW VARIABLE READ_TIMESTAMP") + read_timestamp_query_result_2 = self._cursor.fetchall() - # connect again, as the previous connection is no-op after closing - conn = Connection(shared_instance, dbapi_database) - cursor = conn.cursor() + self._conn.commit() - # read the resulting data from the database - cursor.execute("SELECT * FROM contacts") - got_rows = cursor.fetchall() - conn.commit() + self._cursor.execute("SHOW VARIABLE READ_TIMESTAMP") + read_timestamp_query_result_3 = self._cursor.fetchall() + assert len(self._cursor.description) == 1 + assert self._cursor.description[0].name == "SHOW_READ_TIMESTAMP" - assert got_rows == [want_row] + assert ( + read_timestamp_query_result_1 + == read_timestamp_query_result_2 + == read_timestamp_query_result_3 + ) + assert len(read_timestamp_query_result_1) == 1 + assert len(read_timestamp_query_result_1[0]) == 1 + assert isinstance(read_timestamp_query_result_1[0][0], DatetimeWithNanoseconds) - cursor.close() - conn.close() + self._cursor.execute("SELECT * FROM contacts") + self._cursor.execute("SHOW VARIABLE READ_TIMESTAMP") + read_timestamp_query_result_4 = self._cursor.fetchall() + self._conn.commit() + assert read_timestamp_query_result_1 != read_timestamp_query_result_4 + def test_read_timestamp_client_side_autocommit(self): + """Test executing SHOW_READ_TIMESTAMP client side statement in a + transaction when connection is in autocommit mode.""" -def test_results_checksum(shared_instance, dbapi_database): - """Test that results checksum is calculated properly.""" - conn = Connection(shared_instance, dbapi_database) - cursor = conn.cursor() + self._conn.autocommit = True - cursor.execute( + self._cursor.execute( + """ + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru') """ -INSERT INTO contacts (contact_id, first_name, last_name, email) -VALUES -(1, 'first-name', 'last-name', 'test.email@domen.ru'), -(2, 'first-name2', 'last-name2', 'test.email2@domen.ru') - """ - ) - assert len(conn._statements) == 1 - conn.commit() - - cursor.execute("SELECT * FROM contacts") - got_rows = cursor.fetchall() + ) + self._conn.read_only = True + self._cursor.execute("SELECT * FROM contacts") + assert self._cursor.fetchall() == [ + (2, "first-name", "last-name", "test.email@domen.ru") + ] + self._cursor.execute("SHOW VARIABLE READ_TIMESTAMP") + read_timestamp_query_result_1 = self._cursor.fetchall() + + assert len(read_timestamp_query_result_1) == 1 + assert len(read_timestamp_query_result_1[0]) == 1 + assert len(self._cursor.description) == 1 + assert self._cursor.description[0].name == "SHOW_READ_TIMESTAMP" + assert isinstance(read_timestamp_query_result_1[0][0], DatetimeWithNanoseconds) + + self._conn.read_only = False + self._insert_row(3) + + self._conn.read_only = True + self._cursor.execute("SELECT * FROM contacts") + self._cursor.execute("SHOW VARIABLE READ_TIMESTAMP") + read_timestamp_query_result_2 = self._cursor.fetchall() + assert read_timestamp_query_result_1 != read_timestamp_query_result_2 + + @pytest.mark.parametrize("auto_commit", [False, True]) + def test_batch_dml(self, auto_commit): + """Test batch dml.""" + + if auto_commit: + self._conn.autocommit = True + self._insert_row(1) + + self._cursor.execute("start batch dml") + self._insert_row(2) + self._insert_row(3) + self._cursor.execute("run batch") + + self._insert_row(4) + + # Test starting another dml batch in same transaction works + self._cursor.execute("start batch dml") + self._insert_row(5) + self._insert_row(6) + self._cursor.execute("run batch") + + if not auto_commit: + self._conn.commit() + + self._cursor.execute("SELECT * FROM contacts") + assert ( + self._cursor.fetchall().sort() + == ( + [ + (1, "first-name-1", "last-name-1", "test.email@domen.ru"), + (2, "first-name-2", "last-name-2", "test.email@domen.ru"), + (3, "first-name-3", "last-name-3", "test.email@domen.ru"), + (4, "first-name-4", "last-name-4", "test.email@domen.ru"), + (5, "first-name-5", "last-name-5", "test.email@domen.ru"), + (6, "first-name-6", "last-name-6", "test.email@domen.ru"), + ] + ).sort() + ) - assert len(conn._statements) == 1 - conn.commit() + # Test starting another dml batch in same connection post commit works + self._cursor.execute("start batch dml") + self._insert_row(7) + self._insert_row(8) + self._cursor.execute("run batch") - checksum = hashlib.sha256() - checksum.update(pickle.dumps(got_rows[0])) - checksum.update(pickle.dumps(got_rows[1])) + self._insert_row(9) - assert cursor._checksum.checksum.digest() == checksum.digest() + if not auto_commit: + self._conn.commit() + self._cursor.execute("SELECT * FROM contacts") + assert len(self._cursor.fetchall()) == 9 -def test_execute_many(shared_instance, dbapi_database): - # connect to the test database - conn = Connection(shared_instance, dbapi_database) - cursor = conn.cursor() + def test_abort_batch_dml(self): + """Test abort batch dml.""" - row_data = [ - (1, "first-name", "last-name", "test.email@example.com"), - (2, "first-name2", "last-name2", "test.email2@example.com"), - ] - cursor.executemany( - """ -INSERT INTO contacts (contact_id, first_name, last_name, email) -VALUES (%s, %s, %s, %s) - """, - row_data, - ) - conn.commit() + self._cursor.execute("start batch dml") + self._insert_row(1) + self._insert_row(2) + self._cursor.execute("abort batch") - cursor.executemany( - """SELECT * FROM contacts WHERE contact_id = %s""", - ((1,), (2,)), - ) - res = cursor.fetchall() - conn.commit() + self._insert_row(3) + self._conn.commit() - assert len(res) == len(row_data) - for found, expected in zip(res, row_data): - assert found[0] == expected[0] + self._cursor.execute("SELECT * FROM contacts") + got_rows = self._cursor.fetchall() + assert len(got_rows) == 1 + assert got_rows == [(3, "first-name-3", "last-name-3", "test.email@domen.ru")] - # checking that execute() and executemany() - # results are not mixed together - cursor.execute( - """ -SELECT * FROM contacts WHERE contact_id = 1 -""", - ) - res = cursor.fetchone() - conn.commit() + def test_batch_dml_invalid_statements(self): + """Test batch dml having invalid statements.""" - assert res[0] == 1 - conn.close() + # Test first statement in batch is invalid + self._cursor.execute("start batch dml") + self._cursor.execute( + """ + INSERT INTO unknown_table (contact_id, first_name, last_name, email) + VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru') + """ + ) + self._insert_row(1) + self._insert_row(2) + with pytest.raises(OperationalError): + self._cursor.execute("run batch") + + # Test middle statement in batch is invalid + self._cursor.execute("start batch dml") + self._insert_row(1) + self._cursor.execute( + """ + INSERT INTO unknown_table (contact_id, first_name, last_name, email) + VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru') + """ + ) + self._insert_row(2) + with pytest.raises(OperationalError): + self._cursor.execute("run batch") + + # Test last statement in batch is invalid + self._cursor.execute("start batch dml") + self._insert_row(1) + self._insert_row(2) + self._cursor.execute( + """ + INSERT INTO unknown_table (contact_id, first_name, last_name, email) + VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru') + """ + ) + with pytest.raises(OperationalError): + self._cursor.execute("run batch") + + def test_partitioned_query(self): + """Test partition query works in read-only mode.""" + self._cursor.execute("start batch dml") + for i in range(1, 11): + self._insert_row(i) + self._cursor.execute("run batch") + self._conn.commit() + + self._conn.read_only = True + self._cursor.execute("PARTITION SELECT * FROM contacts") + partition_id_rows = self._cursor.fetchall() + assert len(partition_id_rows) > 0 + + rows = [] + for partition_id_row in partition_id_rows: + self._cursor.execute("RUN PARTITION " + partition_id_row[0]) + rows = rows + self._cursor.fetchall() + assert len(rows) == 10 + self._conn.commit() + + def test_partitioned_query_in_rw_transaction(self): + """Test partition query throws exception when connection is not in + read-only mode and neither in auto-commit mode.""" + + with pytest.raises(ProgrammingError): + self._cursor.execute("PARTITION SELECT * FROM contacts") + + def test_partitioned_query_with_dml_query(self): + """Test partition query throws exception when sql query is a DML query.""" + + self._conn.read_only = True + with pytest.raises(ProgrammingError): + self._cursor.execute( + """ + PARTITION INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (1111, 'first-name', 'last-name', 'test.email@domen.ru') + """ + ) + + def test_partitioned_query_in_autocommit_mode(self): + """Test partition query works when connection is not in read-only mode + but is in auto-commit mode.""" + self._cursor.execute("start batch dml") + for i in range(1, 11): + self._insert_row(i) + self._cursor.execute("run batch") + self._conn.commit() + + self._conn.autocommit = True + self._cursor.execute("PARTITION SELECT * FROM contacts") + partition_id_rows = self._cursor.fetchall() + assert len(partition_id_rows) > 0 + + rows = [] + for partition_id_row in partition_id_rows: + self._cursor.execute("RUN PARTITION " + partition_id_row[0]) + rows = rows + self._cursor.fetchall() + assert len(rows) == 10 + + def test_partitioned_query_with_client_transaction_started(self): + """Test partition query throws exception when connection is not in + read-only mode and transaction started using client side statement.""" + + self._conn.autocommit = True + self._cursor.execute("begin transaction") + with pytest.raises(ProgrammingError): + self._cursor.execute("PARTITION SELECT * FROM contacts") + + def _insert_row(self, i): + self._cursor.execute( + f""" + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES ({i}, 'first-name-{i}', 'last-name-{i}', 'test.email@domen.ru') + """ + ) + def test_begin_success_post_commit(self): + """Test beginning a new transaction post commiting an existing transaction + is possible on a connection, when connection is in autocommit mode.""" + want_row = (2, "first-name", "last-name", "test.email@domen.ru") + self._conn.autocommit = True + self._cursor.execute("begin transaction") + self._cursor.execute( + """ + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru') + """ + ) + self._conn.commit() + + self._cursor.execute("begin transaction") + self._cursor.execute("SELECT * FROM contacts") + got_rows = self._cursor.fetchall() + self._conn.commit() + assert got_rows == [want_row] + + def test_begin_error_before_commit(self): + """Test beginning a new transaction before commiting an existing transaction is not possible on a connection, when connection is in autocommit mode.""" + self._conn.autocommit = True + self._cursor.execute("begin transaction") + self._cursor.execute( + """ + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru') + """ + ) -def test_DDL_autocommit(shared_instance, dbapi_database): - """Check that DDLs in autocommit mode are immediately executed.""" + with pytest.raises(OperationalError): + self._cursor.execute("begin transaction") - try: - conn = Connection(shared_instance, dbapi_database) - conn.autocommit = True + @pytest.mark.parametrize("client_side", [False, True]) + def test_rollback(self, client_side): + """Test rollbacking a transaction with several statements.""" + want_row = (2, "first-name", "last-name", "test.email@domen.ru") - cur = conn.cursor() - cur.execute( + self._cursor.execute( """ - CREATE TABLE Singers ( - SingerId INT64 NOT NULL, - Name STRING(1024), - ) PRIMARY KEY (SingerId) + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru') """ ) - conn.close() - - # if previous DDL wasn't committed, the next DROP TABLE - # statement will fail with a ProgrammingError - conn = Connection(shared_instance, dbapi_database) - cur = conn.cursor() - - cur.execute("DROP TABLE Singers") - conn.commit() - finally: - # Delete table - table = dbapi_database.table("Singers") - if table.exists(): - op = dbapi_database.update_ddl(["DROP TABLE Singers"]) - op.result() - + self._conn.commit() -@pytest.mark.skipif(_helpers.USE_EMULATOR, reason="Emulator does not support json.") -def test_autocommit_with_json_data(shared_instance, dbapi_database): - """ - Check that DDLs in autocommit mode are immediately - executed for json fields. + # execute several DMLs with one transaction + self._cursor.execute( + """ + UPDATE contacts + SET first_name = 'updated-first-name' + WHERE first_name = 'first-name' """ - try: - # Create table - conn = Connection(shared_instance, dbapi_database) - conn.autocommit = True - - cur = conn.cursor() - cur.execute( + ) + self._cursor.execute( """ - CREATE TABLE JsonDetails ( - DataId INT64 NOT NULL, - Details JSON, - ) PRIMARY KEY (DataId) - """ + UPDATE contacts + SET email = 'test.email_updated@domen.ru' + WHERE email = 'test.email@domen.ru' + """ ) - # Insert data to table - cur.execute( - sql="INSERT INTO JsonDetails (DataId, Details) VALUES (%s, %s)", - args=(123, JsonObject({"name": "Jakob", "age": "26"})), + if client_side: + self._cursor.execute("ROLLBACK") + else: + self._conn.rollback() + + # read the resulting data from the database + self._cursor.execute("SELECT * FROM contacts") + got_rows = self._cursor.fetchall() + self._conn.commit() + + assert got_rows == [want_row] + + def test_autocommit_mode_change(self): + """Test auto committing a transaction on `autocommit` mode change.""" + want_row = ( + 2, + "updated-first-name", + "last-name", + "test.email@domen.ru", ) - # Read back the data. - cur.execute("""select * from JsonDetails;""") - got_rows = cur.fetchall() - - # Assert the response - assert len(got_rows) == 1 - assert got_rows[0][0] == 123 - assert got_rows[0][1] == {"age": "26", "name": "Jakob"} - - # Drop the table - cur.execute("DROP TABLE JsonDetails") - conn.commit() - conn.close() - finally: - # Delete table - table = dbapi_database.table("JsonDetails") - if table.exists(): - op = dbapi_database.update_ddl(["DROP TABLE JsonDetails"]) - op.result() - - -@pytest.mark.skipif(_helpers.USE_EMULATOR, reason="Emulator does not support json.") -def test_json_array(shared_instance, dbapi_database): - try: - # Create table - conn = Connection(shared_instance, dbapi_database) - conn.autocommit = True - - cur = conn.cursor() - cur.execute( + self._cursor.execute( """ - CREATE TABLE JsonDetails ( - DataId INT64 NOT NULL, - Details JSON, - ) PRIMARY KEY (DataId) + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru') """ ) - cur.execute( - "INSERT INTO JsonDetails (DataId, Details) VALUES (%s, %s)", - [1, JsonObject([1, 2, 3])], + self._cursor.execute( + """ + UPDATE contacts + SET first_name = 'updated-first-name' + WHERE first_name = 'first-name' + """ ) + self._conn.autocommit = True - cur.execute("SELECT * FROM JsonDetails WHERE DataId = 1") - row = cur.fetchone() - assert isinstance(row[1], JsonObject) - assert row[1].serialize() == "[1,2,3]" + # read the resulting data from the database + self._cursor.execute("SELECT * FROM contacts") + got_rows = self._cursor.fetchall() - cur.execute("DROP TABLE JsonDetails") - conn.close() - finally: - # Delete table - table = dbapi_database.table("JsonDetails") - if table.exists(): - op = dbapi_database.update_ddl(["DROP TABLE JsonDetails"]) - op.result() + assert got_rows == [want_row] - -def test_DDL_commit(shared_instance, dbapi_database): - """Check that DDLs in commit mode are executed on calling `commit()`.""" - try: + @pytest.mark.noautofixt + def test_rollback_on_connection_closing(self, shared_instance, dbapi_database): + """ + When closing a connection all the pending transactions + must be rollbacked. Testing if it's working this way. + """ + want_row = (1, "first-name", "last-name", "test.email@domen.ru") + # connect to the test database conn = Connection(shared_instance, dbapi_database) - cur = conn.cursor() + cursor = conn.cursor() - cur.execute( + cursor.execute( """ - CREATE TABLE Singers ( - SingerId INT64 NOT NULL, - Name STRING(1024), - ) PRIMARY KEY (SingerId) + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (1, 'first-name', 'last-name', 'test.email@domen.ru') """ ) conn.commit() + + cursor.execute( + """ + UPDATE contacts + SET first_name = 'updated-first-name' + WHERE first_name = 'first-name' + """ + ) conn.close() - # if previous DDL wasn't committed, the next DROP TABLE - # statement will fail with a ProgrammingError + # connect again, as the previous connection is no-op after closing conn = Connection(shared_instance, dbapi_database) - cur = conn.cursor() + cursor = conn.cursor() - cur.execute("DROP TABLE Singers") + # read the resulting data from the database + cursor.execute("SELECT * FROM contacts") + got_rows = cursor.fetchall() conn.commit() - finally: - # Delete table - table = dbapi_database.table("Singers") - if table.exists(): - op = dbapi_database.update_ddl(["DROP TABLE Singers"]) - op.result() - - -def test_ping(shared_instance, dbapi_database): - """Check connection validation method.""" - conn = Connection(shared_instance, dbapi_database) - conn.validate() - conn.close() - - -def test_user_agent(shared_instance, dbapi_database): - """Check that DB API uses an appropriate user agent.""" - conn = connect(shared_instance.name, dbapi_database.name) - assert ( - conn.instance._client._client_info.user_agent - == "gl-dbapi/" + package_version.__version__ - ) - assert ( - conn.instance._client._client_info.client_library_version - == package_version.__version__ - ) + assert got_rows == [want_row] -def test_read_only(shared_instance, dbapi_database): - """ - Check that connection set to `read_only=True` uses - ReadOnly transactions. - """ - conn = Connection(shared_instance, dbapi_database, read_only=True) - cur = conn.cursor() + cursor.close() + conn.close() - with pytest.raises(ProgrammingError): - cur.execute( + def test_results_checksum(self): + """Test that results checksum is calculated properly.""" + + self._cursor.execute( """ -UPDATE contacts -SET first_name = 'updated-first-name' -WHERE first_name = 'first-name' -""" + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES + (1, 'first-name', 'last-name', 'test.email@domen.ru'), + (2, 'first-name2', 'last-name2', 'test.email2@domen.ru') + """ ) + assert len(self._conn._statements) == 1 + self._conn.commit() - cur.execute("SELECT * FROM contacts") - conn.commit() + self._cursor.execute("SELECT * FROM contacts") + got_rows = self._cursor.fetchall() + assert len(self._conn._statements) == 1 + self._conn.commit() -def test_staleness(shared_instance, dbapi_database): - """Check the DB API `staleness` option.""" - conn = Connection(shared_instance, dbapi_database) - cursor = conn.cursor() + checksum = hashlib.sha256() + checksum.update(pickle.dumps(got_rows[0])) + checksum.update(pickle.dumps(got_rows[1])) - before_insert = datetime.datetime.utcnow().replace(tzinfo=UTC) - time.sleep(0.25) + assert self._cursor._checksum.checksum.digest() == checksum.digest() - cursor.execute( - """ -INSERT INTO contacts (contact_id, first_name, last_name, email) -VALUES (1, 'first-name', 'last-name', 'test.email@example.com') - """ - ) - conn.commit() - - conn.read_only = True - conn.staleness = {"read_timestamp": before_insert} - cursor.execute("SELECT * FROM contacts") - conn.commit() - assert len(cursor.fetchall()) == 0 - - conn.staleness = None - cursor.execute("SELECT * FROM contacts") - conn.commit() - assert len(cursor.fetchall()) == 1 + def test_execute_many(self): + row_data = [ + (1, "first-name", "last-name", "test.email@example.com"), + (2, "first-name2", "last-name2", "test.email2@example.com"), + ] + self._cursor.executemany( + """ + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (%s, %s, %s, %s) + """, + row_data, + ) + self._conn.commit() - conn.close() + self._cursor.executemany( + """SELECT * FROM contacts WHERE contact_id = %s""", + ((1,), (2,)), + ) + res = self._cursor.fetchall() + self._conn.commit() + assert len(res) == len(row_data) + for found, expected in zip(res, row_data): + assert found[0] == expected[0] -@pytest.mark.parametrize("autocommit", [False, True]) -def test_rowcount(shared_instance, dbapi_database, autocommit): - try: - conn = Connection(shared_instance, dbapi_database) - conn.autocommit = autocommit - cur = conn.cursor() - - cur.execute( + # checking that execute() and executemany() + # results are not mixed together + self._cursor.execute( """ - CREATE TABLE Singers ( - SingerId INT64 NOT NULL, - Name STRING(1024), - ) PRIMARY KEY (SingerId) - """ + SELECT * FROM contacts WHERE contact_id = 1 + """, ) - conn.commit() - - # executemany sets rowcount to the total modified rows - rows = [(i, f"Singer {i}") for i in range(100)] - cur.executemany( - "INSERT INTO Singers (SingerId, Name) VALUES (%s, %s)", rows[:98] + res = self._cursor.fetchone() + self._conn.commit() + + assert res[0] == 1 + + @pytest.mark.noautofixt + def test_DDL_autocommit(self, shared_instance, dbapi_database): + """Check that DDLs in autocommit mode are immediately executed.""" + + try: + conn = Connection(shared_instance, dbapi_database) + conn.autocommit = True + + cur = conn.cursor() + cur.execute( + """ + CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + Name STRING(1024), + ) PRIMARY KEY (SingerId) + """ + ) + conn.close() + + # if previous DDL wasn't committed, the next DROP TABLE + # statement will fail with a ProgrammingError + conn = Connection(shared_instance, dbapi_database) + cur = conn.cursor() + + cur.execute("DROP TABLE Singers") + conn.commit() + finally: + # Delete table + table = dbapi_database.table("Singers") + if table.exists(): + op = dbapi_database.update_ddl(["DROP TABLE Singers"]) + op.result() + + def test_ddl_execute_autocommit_true(self, dbapi_database): + """Check that DDL statement in autocommit mode results in successful + DDL statement execution for execute method.""" + + self._conn.autocommit = True + self._cursor.execute( + """ + CREATE TABLE DdlExecuteAutocommit ( + SingerId INT64 NOT NULL, + Name STRING(1024), + ) PRIMARY KEY (SingerId) + """ + ) + table = dbapi_database.table("DdlExecuteAutocommit") + assert table.exists() is True + + def test_ddl_executemany_autocommit_true(self, dbapi_database): + """Check that DDL statement in autocommit mode results in exception for + executemany method .""" + + self._conn.autocommit = True + with pytest.raises(ProgrammingError): + self._cursor.executemany( + """ + CREATE TABLE DdlExecuteManyAutocommit ( + SingerId INT64 NOT NULL, + Name STRING(1024), + ) PRIMARY KEY (SingerId) + """, + [], + ) + table = dbapi_database.table("DdlExecuteManyAutocommit") + assert table.exists() is False + + def test_ddl_executemany_autocommit_false(self, dbapi_database): + """Check that DDL statement in non-autocommit mode results in exception for + executemany method .""" + with pytest.raises(ProgrammingError): + self._cursor.executemany( + """ + CREATE TABLE DdlExecuteManyAutocommit ( + SingerId INT64 NOT NULL, + Name STRING(1024), + ) PRIMARY KEY (SingerId) + """, + [], + ) + table = dbapi_database.table("DdlExecuteManyAutocommit") + assert table.exists() is False + + def test_ddl_execute(self, dbapi_database): + """Check that DDL statement followed by non-DDL execute statement in + non autocommit mode results in successful DDL statement execution.""" + + want_row = ( + 1, + "first-name", + ) + self._cursor.execute( + """ + CREATE TABLE DdlExecute ( + SingerId INT64 NOT NULL, + Name STRING(1024), + ) PRIMARY KEY (SingerId) + """ ) - assert cur.rowcount == 98 + table = dbapi_database.table("DdlExecute") + assert table.exists() is False - # execute with INSERT - cur.execute( - "INSERT INTO Singers (SingerId, Name) VALUES (%s, %s), (%s, %s)", - [x for row in rows[98:] for x in row], + self._cursor.execute( + """ + INSERT INTO DdlExecute (SingerId, Name) + VALUES (1, "first-name") + """ ) - assert cur.rowcount == 2 + assert table.exists() is True + self._conn.commit() - # execute with UPDATE - cur.execute("UPDATE Singers SET Name = 'Cher' WHERE SingerId < 25") - assert cur.rowcount == 25 + # read the resulting data from the database + self._cursor.execute("SELECT * FROM DdlExecute") + got_rows = self._cursor.fetchall() - # execute with SELECT - cur.execute("SELECT Name FROM Singers WHERE SingerId < 75") - assert len(cur.fetchall()) == 75 - # rowcount is not available for SELECT - assert cur.rowcount == -1 + assert got_rows == [want_row] + + def test_ddl_executemany(self, dbapi_database): + """Check that DDL statement followed by non-DDL executemany statement in + non autocommit mode results in successful DDL statement execution.""" + + want_row = ( + 1, + "first-name", + ) + self._cursor.execute( + """ + CREATE TABLE DdlExecuteMany ( + SingerId INT64 NOT NULL, + Name STRING(1024), + ) PRIMARY KEY (SingerId) + """ + ) + table = dbapi_database.table("DdlExecuteMany") + assert table.exists() is False - # execute with DELETE - cur.execute("DELETE FROM Singers") - assert cur.rowcount == 100 + self._cursor.executemany( + """ + INSERT INTO DdlExecuteMany (SingerId, Name) + VALUES (%s, %s) + """, + [want_row], + ) + assert table.exists() is True + self._conn.commit() - # execute with UPDATE matching 0 rows - cur.execute("UPDATE Singers SET Name = 'Cher' WHERE SingerId < 25") - assert cur.rowcount == 0 + # read the resulting data from the database + self._cursor.execute("SELECT * FROM DdlExecuteMany") + got_rows = self._cursor.fetchall() - conn.commit() - cur.execute("DROP TABLE Singers") - conn.commit() - finally: - # Delete table - table = dbapi_database.table("Singers") - if table.exists(): - op = dbapi_database.update_ddl(["DROP TABLE Singers"]) - op.result() + assert got_rows == [want_row] + @pytest.mark.skipif(_helpers.USE_EMULATOR, reason="Emulator does not support json.") + def test_autocommit_with_json_data(self, dbapi_database): + """ + Check that DDLs in autocommit mode are immediately + executed for json fields. + """ + try: + self._conn.autocommit = True + self._cursor.execute( + """ + CREATE TABLE JsonDetails ( + DataId INT64 NOT NULL, + Details JSON, + ) PRIMARY KEY (DataId) + """ + ) + + # Insert data to table + self._cursor.execute( + sql="INSERT INTO JsonDetails (DataId, Details) VALUES (%s, %s)", + args=(123, JsonObject({"name": "Jakob", "age": "26"})), + ) + + # Read back the data. + self._cursor.execute("""select * from JsonDetails;""") + got_rows = self._cursor.fetchall() + + # Assert the response + assert len(got_rows) == 1 + assert got_rows[0][0] == 123 + assert got_rows[0][1] == {"age": "26", "name": "Jakob"} + + # Drop the table + self._cursor.execute("DROP TABLE JsonDetails") + self._conn.commit() + finally: + # Delete table + table = dbapi_database.table("JsonDetails") + if table.exists(): + op = dbapi_database.update_ddl(["DROP TABLE JsonDetails"]) + op.result() + + @pytest.mark.skipif(_helpers.USE_EMULATOR, reason="Emulator does not support json.") + def test_json_array(self, dbapi_database): + try: + # Create table + self._conn.autocommit = True + + self._cursor.execute( + """ + CREATE TABLE JsonDetails ( + DataId INT64 NOT NULL, + Details JSON, + ) PRIMARY KEY (DataId) + """ + ) + self._cursor.execute( + "INSERT INTO JsonDetails (DataId, Details) VALUES (%s, %s)", + [1, JsonObject([1, 2, 3])], + ) + + self._cursor.execute("SELECT * FROM JsonDetails WHERE DataId = 1") + row = self._cursor.fetchone() + assert isinstance(row[1], JsonObject) + assert row[1].serialize() == "[1,2,3]" + + self._cursor.execute("DROP TABLE JsonDetails") + finally: + # Delete table + table = dbapi_database.table("JsonDetails") + if table.exists(): + op = dbapi_database.update_ddl(["DROP TABLE JsonDetails"]) + op.result() + + @pytest.mark.noautofixt + def test_DDL_commit(self, shared_instance, dbapi_database): + """Check that DDLs in commit mode are executed on calling `commit()`.""" + try: + conn = Connection(shared_instance, dbapi_database) + cur = conn.cursor() + + cur.execute( + """ + CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + Name STRING(1024), + ) PRIMARY KEY (SingerId) + """ + ) + conn.commit() + conn.close() + + # if previous DDL wasn't committed, the next DROP TABLE + # statement will fail with a ProgrammingError + conn = Connection(shared_instance, dbapi_database) + cur = conn.cursor() + + cur.execute("DROP TABLE Singers") + conn.commit() + finally: + # Delete table + table = dbapi_database.table("Singers") + if table.exists(): + op = dbapi_database.update_ddl(["DROP TABLE Singers"]) + op.result() + + def test_ping(self): + """Check connection validation method.""" + self._conn.validate() + + @pytest.mark.noautofixt + def test_user_agent(self, shared_instance, dbapi_database): + """Check that DB API uses an appropriate user agent.""" + conn = connect(shared_instance.name, dbapi_database.name) + assert ( + conn.instance._client._client_info.user_agent + == "gl-dbapi/" + package_version.__version__ + ) + assert ( + conn.instance._client._client_info.client_library_version + == package_version.__version__ + ) -@pytest.mark.parametrize("autocommit", [False, True]) -@pytest.mark.skipif( - _helpers.USE_EMULATOR, reason="Emulator does not support DML Returning." -) -def test_dml_returning_insert(shared_instance, dbapi_database, autocommit): - conn = Connection(shared_instance, dbapi_database) - conn.autocommit = autocommit - cur = conn.cursor() - cur.execute( + def test_read_only(self): + """ + Check that connection set to `read_only=True` uses + ReadOnly transactions. """ -INSERT INTO contacts (contact_id, first_name, last_name, email) -VALUES (1, 'first-name', 'last-name', 'test.email@example.com') -THEN RETURN contact_id, first_name - """ - ) - assert cur.fetchone() == (1, "first-name") - assert cur.rowcount == 1 - conn.commit() + self._conn.read_only = True + self._cursor.execute("SELECT * FROM contacts") + assert self._cursor.fetchall() == [] + self._conn.commit() -@pytest.mark.parametrize("autocommit", [False, True]) -@pytest.mark.skipif( - _helpers.USE_EMULATOR, reason="Emulator does not support DML Returning." -) -def test_dml_returning_update(shared_instance, dbapi_database, autocommit): - conn = Connection(shared_instance, dbapi_database) - conn.autocommit = autocommit - cur = conn.cursor() - cur.execute( + def test_read_only_dml(self): """ -INSERT INTO contacts (contact_id, first_name, last_name, email) -VALUES (1, 'first-name', 'last-name', 'test.email@example.com') - """ - ) - assert cur.rowcount == 1 - cur.execute( + Check that connection set to `read_only=True` leads to exception when + executing dml statements. """ -UPDATE contacts SET first_name = 'new-name' WHERE contact_id = 1 -THEN RETURN contact_id, first_name + + self._conn.read_only = True + with pytest.raises(ProgrammingError): + self._cursor.execute( + """ + UPDATE contacts + SET first_name = 'updated-first-name' + WHERE first_name = 'first-name' """ - ) - assert cur.fetchone() == (1, "new-name") - assert cur.rowcount == 1 - conn.commit() + ) + def test_staleness(self): + """Check the DB API `staleness` option.""" -@pytest.mark.parametrize("autocommit", [False, True]) -@pytest.mark.skipif( - _helpers.USE_EMULATOR, reason="Emulator does not support DML Returning." -) -def test_dml_returning_delete(shared_instance, dbapi_database, autocommit): - conn = Connection(shared_instance, dbapi_database) - conn.autocommit = autocommit - cur = conn.cursor() - cur.execute( + before_insert = datetime.datetime.utcnow().replace(tzinfo=UTC) + time.sleep(0.25) + + self._cursor.execute( + """ + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (1, 'first-name', 'last-name', 'test.email@example.com') """ -INSERT INTO contacts (contact_id, first_name, last_name, email) -VALUES (1, 'first-name', 'last-name', 'test.email@example.com') - """ + ) + self._conn.commit() + + self._conn.read_only = True + self._conn.staleness = {"read_timestamp": before_insert} + self._cursor.execute("SELECT * FROM contacts") + self._conn.commit() + assert len(self._cursor.fetchall()) == 0 + + self._conn.staleness = None + self._cursor.execute("SELECT * FROM contacts") + self._conn.commit() + assert len(self._cursor.fetchall()) == 1 + + @pytest.mark.parametrize("autocommit", [False, True]) + def test_rowcount(self, dbapi_database, autocommit): + try: + self._conn.autocommit = autocommit + + self._cursor.execute( + """ + CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + Name STRING(1024), + ) PRIMARY KEY (SingerId) + """ + ) + self._conn.commit() + + # executemany sets rowcount to the total modified rows + rows = [(i, f"Singer {i}") for i in range(100)] + self._cursor.executemany( + "INSERT INTO Singers (SingerId, Name) VALUES (%s, %s)", rows[:98] + ) + assert self._cursor.rowcount == 98 + + # execute with INSERT + self._cursor.execute( + "INSERT INTO Singers (SingerId, Name) VALUES (%s, %s), (%s, %s)", + [x for row in rows[98:] for x in row], + ) + assert self._cursor.rowcount == 2 + + # execute with UPDATE + self._cursor.execute("UPDATE Singers SET Name = 'Cher' WHERE SingerId < 25") + assert self._cursor.rowcount == 25 + + # execute with SELECT + self._cursor.execute("SELECT Name FROM Singers WHERE SingerId < 75") + assert len(self._cursor.fetchall()) == 75 + # rowcount is not available for SELECT + assert self._cursor.rowcount == -1 + + # execute with DELETE + self._cursor.execute("DELETE FROM Singers") + assert self._cursor.rowcount == 100 + + # execute with UPDATE matching 0 rows + self._cursor.execute("UPDATE Singers SET Name = 'Cher' WHERE SingerId < 25") + assert self._cursor.rowcount == 0 + + self._conn.commit() + self._cursor.execute("DROP TABLE Singers") + self._conn.commit() + finally: + # Delete table + table = dbapi_database.table("Singers") + if table.exists(): + op = dbapi_database.update_ddl(["DROP TABLE Singers"]) + op.result() + + @pytest.mark.parametrize("autocommit", [False, True]) + @pytest.mark.skipif( + _helpers.USE_EMULATOR, reason="Emulator does not support DML Returning." ) - assert cur.rowcount == 1 - cur.execute( + def test_dml_returning_insert(self, autocommit): + self._conn.autocommit = autocommit + self._cursor.execute( + """ + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (1, 'first-name', 'last-name', 'test.email@example.com') + THEN RETURN contact_id, first_name """ -DELETE FROM contacts WHERE contact_id = 1 -THEN RETURN contact_id, first_name - """ + ) + assert self._cursor.fetchone() == (1, "first-name") + assert self._cursor.rowcount == 1 + self._conn.commit() + + @pytest.mark.parametrize("autocommit", [False, True]) + @pytest.mark.skipif( + _helpers.USE_EMULATOR, reason="Emulator does not support DML Returning." + ) + def test_dml_returning_update(self, autocommit): + self._conn.autocommit = autocommit + self._cursor.execute( + """ + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (1, 'first-name', 'last-name', 'test.email@example.com') + """ + ) + assert self._cursor.rowcount == 1 + self._cursor.execute( + """ + UPDATE contacts SET first_name = 'new-name' WHERE contact_id = 1 + THEN RETURN contact_id, first_name + """ + ) + assert self._cursor.fetchone() == (1, "new-name") + assert self._cursor.rowcount == 1 + self._conn.commit() + + @pytest.mark.parametrize("autocommit", [False, True]) + @pytest.mark.skipif( + _helpers.USE_EMULATOR, reason="Emulator does not support DML Returning." ) - assert cur.fetchone() == (1, "first-name") - assert cur.rowcount == 1 - conn.commit() + def test_dml_returning_delete(self, autocommit): + self._conn.autocommit = autocommit + self._cursor.execute( + """ + INSERT INTO contacts (contact_id, first_name, last_name, email) + VALUES (1, 'first-name', 'last-name', 'test.email@example.com') + """ + ) + assert self._cursor.rowcount == 1 + self._cursor.execute( + """ + DELETE FROM contacts WHERE contact_id = 1 + THEN RETURN contact_id, first_name + """ + ) + assert self._cursor.fetchone() == (1, "first-name") + assert self._cursor.rowcount == 1 + self._conn.commit() diff --git a/tests/system/test_session_api.py b/tests/system/test_session_api.py index 7d58324b04..30981322cc 100644 --- a/tests/system/test_session_api.py +++ b/tests/system/test_session_api.py @@ -306,7 +306,6 @@ def assert_span_attributes( def _make_attributes(db_instance, **kwargs): - attributes = { "db.type": "spanner", "db.url": "spanner.googleapis.com", @@ -1099,7 +1098,6 @@ def test_transaction_batch_update_w_parent_span( ) def unit_of_work(transaction): - status, row_counts = transaction.batch_update( [insert_statement, update_statement, delete_statement] ) @@ -1303,7 +1301,6 @@ def _row_data(max_index): def _set_up_table(database, row_count): - sd = _sample_data def _unit_of_work(transaction): @@ -1430,7 +1427,6 @@ def test_multiuse_snapshot_read_isolation_read_timestamp(sessions_database): with sessions_database.snapshot( read_timestamp=committed, multi_use=True ) as read_ts: - before = list(read_ts.read(sd.TABLE, sd.COLUMNS, sd.ALL)) sd._check_row_data(before, all_data_rows) @@ -1452,7 +1448,6 @@ def test_multiuse_snapshot_read_isolation_exact_staleness(sessions_database): delta = datetime.timedelta(microseconds=1000) with sessions_database.snapshot(exact_staleness=delta, multi_use=True) as exact: - before = list(exact.read(sd.TABLE, sd.COLUMNS, sd.ALL)) sd._check_row_data(before, all_data_rows) @@ -1918,6 +1913,19 @@ def test_execute_sql_w_manual_consume(sessions_database): assert streamed._pending_chunk is None +def test_execute_sql_w_to_dict_list(sessions_database): + sd = _sample_data + row_count = 40 + _set_up_table(sessions_database, row_count) + + with sessions_database.snapshot() as snapshot: + rows = snapshot.execute_sql(sd.SQL).to_dict_list() + all_data_rows = list(_row_data(row_count)) + row_data = [list(row.values()) for row in rows] + sd._check_row_data(row_data, all_data_rows) + assert all(set(row.keys()) == set(sd.COLUMNS) for row in rows) + + def _check_sql_results( database, sql, @@ -1945,7 +1953,6 @@ def test_multiuse_snapshot_execute_sql_isolation_strong(sessions_database): all_data_rows = list(_row_data(row_count)) with sessions_database.snapshot(multi_use=True) as strong: - before = list(strong.execute_sql(sd.SQL)) sd._check_row_data(before, all_data_rows) @@ -2005,7 +2012,6 @@ def test_invalid_type(sessions_database): def test_execute_sql_select_1(sessions_database): - sessions_database.snapshot(multi_use=True) # Hello, world query @@ -2175,7 +2181,6 @@ def test_execute_sql_w_bytes_bindings(sessions_database, database_dialect): def test_execute_sql_w_timestamp_bindings(sessions_database, database_dialect): - timestamp_1 = datetime_helpers.DatetimeWithNanoseconds( 1989, 1, 17, 17, 59, 12, nanosecond=345612789 ) @@ -2462,7 +2467,6 @@ def test_execute_sql_w_query_param_struct(sessions_database, not_postgres): def test_execute_sql_returning_transfinite_floats(sessions_database, not_postgres): - with sessions_database.snapshot(multi_use=True) as snapshot: # Query returning -inf, +inf, NaN as column values rows = list( @@ -2517,6 +2521,41 @@ def test_partition_query(sessions_database, not_emulator): batch_txn.close() +def test_mutation_groups_insert_or_update_then_query(not_emulator, sessions_database): + sd = _sample_data + num_groups = 3 + num_mutations_per_group = len(sd.BATCH_WRITE_ROW_DATA) // num_groups + + with sessions_database.batch() as batch: + batch.delete(sd.TABLE, sd.ALL) + + with sessions_database.mutation_groups() as groups: + for i in range(num_groups): + group = groups.group() + for j in range(num_mutations_per_group): + group.insert_or_update( + sd.TABLE, + sd.COLUMNS, + [sd.BATCH_WRITE_ROW_DATA[i * num_mutations_per_group + j]], + ) + # Response indexes received + seen = collections.Counter() + for response in groups.batch_write(): + _check_batch_status(response.status.code) + assert response.commit_timestamp is not None + assert len(response.indexes) > 0 + seen.update(response.indexes) + # All indexes must be in the range [0, num_groups-1] and seen exactly once + assert len(seen) == num_groups + assert all((0 <= idx < num_groups and ct == 1) for (idx, ct) in seen.items()) + + # Verify the writes by reading from the database + with sessions_database.snapshot() as snapshot: + rows = list(snapshot.execute_sql(sd.SQL)) + + sd._check_rows_data(rows, sd.BATCH_WRITE_ROW_DATA) + + class FauxCall: def __init__(self, code, details="FauxCall"): self._code = code @@ -2537,7 +2576,6 @@ def details(self): def _check_batch_status(status_code, expected=code_pb2.OK): if status_code != expected: - _status_code_to_grpc_status_code = { member.value[0]: member for member in grpc.StatusCode } diff --git a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py index 6f5ec35284..48d300b32a 100644 --- a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py +++ b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py @@ -63,7 +63,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account from google.protobuf import any_pb2 # type: ignore @@ -6627,8 +6627,9 @@ def test_list_databases_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.ListDatabasesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_database_admin.ListDatabasesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6710,10 +6711,9 @@ def test_list_databases_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.ListDatabasesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_database_admin.ListDatabasesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6848,8 +6848,9 @@ def test_list_databases_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.ListDatabasesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_database_admin.ListDatabasesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7253,8 +7254,9 @@ def test_get_database_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.Database.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_database_admin.Database.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7335,8 +7337,9 @@ def test_get_database_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.Database.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_database_admin.Database.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7463,8 +7466,9 @@ def test_get_database_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.Database.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_database_admin.Database.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7555,6 +7559,73 @@ def test_update_database_rest(request_type): "enable_drop_protection": True, "reconciling": True, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = spanner_database_admin.UpdateDatabaseRequest.meta.fields["database"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["database"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["database"][field])): + del request_init["database"][field][i][subfield] + else: + del request_init["database"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -7736,43 +7807,6 @@ def test_update_database_rest_bad_request( request_init = { "database": {"name": "projects/sample1/instances/sample2/databases/sample3"} } - request_init["database"] = { - "name": "projects/sample1/instances/sample2/databases/sample3", - "state": 1, - "create_time": {"seconds": 751, "nanos": 543}, - "restore_info": { - "source_type": 1, - "backup_info": { - "backup": "backup_value", - "version_time": {}, - "create_time": {}, - "source_database": "source_database_value", - }, - }, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, - "encryption_info": [ - { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - } - ], - "version_retention_period": "version_retention_period_value", - "earliest_version_time": {}, - "default_leader": "default_leader_value", - "database_dialect": 1, - "enable_drop_protection": True, - "reconciling": True, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8415,8 +8449,9 @@ def test_get_database_ddl_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.GetDatabaseDdlResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_database_admin.GetDatabaseDdlResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8491,10 +8526,11 @@ def test_get_database_ddl_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.GetDatabaseDdlResponse.pb( + # Convert return value to protobuf type + return_value = spanner_database_admin.GetDatabaseDdlResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8623,8 +8659,9 @@ def test_get_database_ddl_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.GetDatabaseDdlResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_database_admin.GetDatabaseDdlResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8690,8 +8727,7 @@ def test_set_iam_policy_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8768,8 +8804,7 @@ def test_set_iam_policy_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8900,8 +8935,7 @@ def test_set_iam_policy_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8967,8 +9001,7 @@ def test_get_iam_policy_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9045,8 +9078,7 @@ def test_get_iam_policy_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9169,8 +9201,7 @@ def test_get_iam_policy_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9235,8 +9266,7 @@ def test_test_iam_permissions_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9316,8 +9346,7 @@ def test_test_iam_permissions_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9451,8 +9480,7 @@ def test_test_iam_permissions_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9539,6 +9567,73 @@ def test_create_backup_rest(request_type): ], "max_expire_time": {}, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = gsad_backup.CreateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -9747,39 +9842,6 @@ def test_create_backup_rest_bad_request( # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["backup"] = { - "database": "database_value", - "version_time": {"seconds": 751, "nanos": 543}, - "expire_time": {}, - "name": "name_value", - "create_time": {}, - "size_bytes": 1089, - "state": 1, - "referencing_databases": [ - "referencing_databases_value1", - "referencing_databases_value2", - ], - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - "database_dialect": 1, - "referencing_backups": [ - "referencing_backups_value1", - "referencing_backups_value2", - ], - "max_expire_time": {}, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -10175,8 +10237,9 @@ def test_get_backup_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = backup.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = backup.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10255,8 +10318,9 @@ def test_get_backup_rest_required_fields(request_type=backup.GetBackupRequest): response_value = Response() response_value.status_code = 200 - pb_return_value = backup.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = backup.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10377,8 +10441,9 @@ def test_get_backup_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = backup.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = backup.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10465,6 +10530,73 @@ def test_update_backup_rest(request_type): ], "max_expire_time": {}, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = gsad_backup.UpdateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -10483,8 +10615,9 @@ def test_update_backup_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = gsad_backup.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = gsad_backup.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10563,8 +10696,9 @@ def test_update_backup_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = gsad_backup.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = gsad_backup.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10660,39 +10794,6 @@ def test_update_backup_rest_bad_request( request_init = { "backup": {"name": "projects/sample1/instances/sample2/backups/sample3"} } - request_init["backup"] = { - "database": "database_value", - "version_time": {"seconds": 751, "nanos": 543}, - "expire_time": {}, - "name": "projects/sample1/instances/sample2/backups/sample3", - "create_time": {}, - "size_bytes": 1089, - "state": 1, - "referencing_databases": [ - "referencing_databases_value1", - "referencing_databases_value2", - ], - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - "database_dialect": 1, - "referencing_backups": [ - "referencing_backups_value1", - "referencing_backups_value2", - ], - "max_expire_time": {}, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -10733,8 +10834,9 @@ def test_update_backup_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = gsad_backup.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = gsad_backup.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11048,8 +11150,9 @@ def test_list_backups_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = backup.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = backup.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11130,8 +11233,9 @@ def test_list_backups_rest_required_fields(request_type=backup.ListBackupsReques response_value = Response() response_value.status_code = 200 - pb_return_value = backup.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = backup.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11263,8 +11367,9 @@ def test_list_backups_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = backup.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = backup.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11662,10 +11767,11 @@ def test_list_database_operations_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.ListDatabaseOperationsResponse.pb( + # Convert return value to protobuf type + return_value = spanner_database_admin.ListDatabaseOperationsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11748,10 +11854,11 @@ def test_list_database_operations_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.ListDatabaseOperationsResponse.pb( + # Convert return value to protobuf type + return_value = spanner_database_admin.ListDatabaseOperationsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11888,10 +11995,11 @@ def test_list_database_operations_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.ListDatabaseOperationsResponse.pb( + # Convert return value to protobuf type + return_value = spanner_database_admin.ListDatabaseOperationsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12014,8 +12122,9 @@ def test_list_backup_operations_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = backup.ListBackupOperationsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = backup.ListBackupOperationsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12098,8 +12207,9 @@ def test_list_backup_operations_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = backup.ListBackupOperationsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = backup.ListBackupOperationsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12233,8 +12343,9 @@ def test_list_backup_operations_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = backup.ListBackupOperationsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = backup.ListBackupOperationsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12356,10 +12467,9 @@ def test_list_database_roles_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.ListDatabaseRolesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_database_admin.ListDatabaseRolesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12441,10 +12551,11 @@ def test_list_database_roles_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.ListDatabaseRolesResponse.pb( + # Convert return value to protobuf type + return_value = spanner_database_admin.ListDatabaseRolesResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12582,10 +12693,9 @@ def test_list_database_roles_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_database_admin.ListDatabaseRolesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_database_admin.ListDatabaseRolesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -13942,7 +14052,7 @@ def test_delete_operation(transport: str = "grpc"): @pytest.mark.asyncio -async def test_delete_operation_async(transport: str = "grpc"): +async def test_delete_operation_async(transport: str = "grpc_asyncio"): client = DatabaseAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -14081,7 +14191,7 @@ def test_cancel_operation(transport: str = "grpc"): @pytest.mark.asyncio -async def test_cancel_operation_async(transport: str = "grpc"): +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): client = DatabaseAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -14220,7 +14330,7 @@ def test_get_operation(transport: str = "grpc"): @pytest.mark.asyncio -async def test_get_operation_async(transport: str = "grpc"): +async def test_get_operation_async(transport: str = "grpc_asyncio"): client = DatabaseAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -14365,7 +14475,7 @@ def test_list_operations(transport: str = "grpc"): @pytest.mark.asyncio -async def test_list_operations_async(transport: str = "grpc"): +async def test_list_operations_async(transport: str = "grpc_asyncio"): client = DatabaseAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, diff --git a/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py index 29c6a1621e..ac621afc00 100644 --- a/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py +++ b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py @@ -60,7 +60,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account from google.protobuf import field_mask_pb2 # type: ignore @@ -4838,10 +4838,11 @@ def test_list_instance_configs_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_instance_admin.ListInstanceConfigsResponse.pb( + # Convert return value to protobuf type + return_value = spanner_instance_admin.ListInstanceConfigsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4923,10 +4924,11 @@ def test_list_instance_configs_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_instance_admin.ListInstanceConfigsResponse.pb( + # Convert return value to protobuf type + return_value = spanner_instance_admin.ListInstanceConfigsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -5062,10 +5064,11 @@ def test_list_instance_configs_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_instance_admin.ListInstanceConfigsResponse.pb( + # Convert return value to protobuf type + return_value = spanner_instance_admin.ListInstanceConfigsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -5196,8 +5199,9 @@ def test_get_instance_config_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_instance_admin.InstanceConfig.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_instance_admin.InstanceConfig.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -5282,8 +5286,9 @@ def test_get_instance_config_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_instance_admin.InstanceConfig.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_instance_admin.InstanceConfig.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -5409,8 +5414,9 @@ def test_get_instance_config_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_instance_admin.InstanceConfig.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_instance_admin.InstanceConfig.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6299,10 +6305,11 @@ def test_list_instance_config_operations_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = ( - spanner_instance_admin.ListInstanceConfigOperationsResponse.pb(return_value) + # Convert return value to protobuf type + return_value = spanner_instance_admin.ListInstanceConfigOperationsResponse.pb( + return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6385,12 +6392,13 @@ def test_list_instance_config_operations_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = ( + # Convert return value to protobuf type + return_value = ( spanner_instance_admin.ListInstanceConfigOperationsResponse.pb( return_value ) ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6531,10 +6539,11 @@ def test_list_instance_config_operations_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = ( - spanner_instance_admin.ListInstanceConfigOperationsResponse.pb(return_value) + # Convert return value to protobuf type + return_value = spanner_instance_admin.ListInstanceConfigOperationsResponse.pb( + return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6659,8 +6668,9 @@ def test_list_instances_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6743,10 +6753,9 @@ def test_list_instances_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_instance_admin.ListInstancesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6882,8 +6891,9 @@ def test_list_instances_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7009,8 +7019,9 @@ def test_get_instance_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_instance_admin.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_instance_admin.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7093,8 +7104,9 @@ def test_get_instance_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_instance_admin.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_instance_admin.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7219,8 +7231,9 @@ def test_get_instance_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner_instance_admin.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner_instance_admin.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8082,8 +8095,7 @@ def test_set_iam_policy_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8160,8 +8172,7 @@ def test_set_iam_policy_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8290,8 +8301,7 @@ def test_set_iam_policy_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8357,8 +8367,7 @@ def test_get_iam_policy_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8435,8 +8444,7 @@ def test_get_iam_policy_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8557,8 +8565,7 @@ def test_get_iam_policy_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8623,8 +8630,7 @@ def test_test_iam_permissions_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8704,8 +8710,7 @@ def test_test_iam_permissions_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8837,8 +8842,7 @@ def test_test_iam_permissions_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value diff --git a/tests/unit/gapic/spanner_v1/test_spanner.py b/tests/unit/gapic/spanner_v1/test_spanner.py index 8bf8407724..d136ba902c 100644 --- a/tests/unit/gapic/spanner_v1/test_spanner.py +++ b/tests/unit/gapic/spanner_v1/test_spanner.py @@ -3857,6 +3857,292 @@ async def test_partition_read_field_headers_async(): ) in kw["metadata"] +@pytest.mark.parametrize( + "request_type", + [ + spanner.BatchWriteRequest, + dict, + ], +) +def test_batch_write(request_type, transport: str = "grpc"): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([spanner.BatchWriteResponse()]) + response = client.batch_write(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == spanner.BatchWriteRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, spanner.BatchWriteResponse) + + +def test_batch_write_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + client.batch_write() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == spanner.BatchWriteRequest() + + +@pytest.mark.asyncio +async def test_batch_write_async( + transport: str = "grpc_asyncio", request_type=spanner.BatchWriteRequest +): + client = SpannerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[spanner.BatchWriteResponse()] + ) + response = await client.batch_write(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == spanner.BatchWriteRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, spanner.BatchWriteResponse) + + +@pytest.mark.asyncio +async def test_batch_write_async_from_dict(): + await test_batch_write_async(request_type=dict) + + +def test_batch_write_field_headers(): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.BatchWriteRequest() + + request.session = "session_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + call.return_value = iter([spanner.BatchWriteResponse()]) + client.batch_write(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "session=session_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_write_field_headers_async(): + client = SpannerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.BatchWriteRequest() + + request.session = "session_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[spanner.BatchWriteResponse()] + ) + await client.batch_write(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "session=session_value", + ) in kw["metadata"] + + +def test_batch_write_flattened(): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([spanner.BatchWriteResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_write( + session="session_value", + mutation_groups=[ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].session + mock_val = "session_value" + assert arg == mock_val + arg = args[0].mutation_groups + mock_val = [ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ] + assert arg == mock_val + + +def test_batch_write_flattened_error(): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_write( + spanner.BatchWriteRequest(), + session="session_value", + mutation_groups=[ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ], + ) + + +@pytest.mark.asyncio +async def test_batch_write_flattened_async(): + client = SpannerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([spanner.BatchWriteResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_write( + session="session_value", + mutation_groups=[ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].session + mock_val = "session_value" + assert arg == mock_val + arg = args[0].mutation_groups + mock_val = [ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_batch_write_flattened_error_async(): + client = SpannerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_write( + spanner.BatchWriteRequest(), + session="session_value", + mutation_groups=[ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ], + ) + + @pytest.mark.parametrize( "request_type", [ @@ -3885,8 +4171,9 @@ def test_create_session_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.Session.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.Session.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -3961,8 +4248,9 @@ def test_create_session_rest_required_fields(request_type=spanner.CreateSessionR response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.Session.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.Session.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4091,8 +4379,9 @@ def test_create_session_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.Session.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.Session.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4155,8 +4444,9 @@ def test_batch_create_sessions_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.BatchCreateSessionsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.BatchCreateSessionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4235,8 +4525,9 @@ def test_batch_create_sessions_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.BatchCreateSessionsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.BatchCreateSessionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4370,8 +4661,9 @@ def test_batch_create_sessions_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.BatchCreateSessionsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.BatchCreateSessionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4440,8 +4732,9 @@ def test_get_session_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.Session.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.Session.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4515,8 +4808,9 @@ def test_get_session_rest_required_fields(request_type=spanner.GetSessionRequest response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.Session.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.Session.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4639,8 +4933,9 @@ def test_get_session_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.Session.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.Session.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4705,8 +5000,9 @@ def test_list_sessions_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.ListSessionsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.ListSessionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4787,8 +5083,9 @@ def test_list_sessions_rest_required_fields(request_type=spanner.ListSessionsReq response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.ListSessionsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.ListSessionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4920,8 +5217,9 @@ def test_list_sessions_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.ListSessionsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.ListSessionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -5296,8 +5594,9 @@ def test_execute_sql_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = result_set.ResultSet.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = result_set.ResultSet.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -5374,8 +5673,9 @@ def test_execute_sql_rest_required_fields(request_type=spanner.ExecuteSqlRequest response_value = Response() response_value.status_code = 200 - pb_return_value = result_set.ResultSet.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = result_set.ResultSet.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -5517,8 +5817,9 @@ def test_execute_streaming_sql_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = result_set.PartialResultSet.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = result_set.PartialResultSet.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -5606,8 +5907,9 @@ def test_execute_streaming_sql_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = result_set.PartialResultSet.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = result_set.PartialResultSet.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -5752,8 +6054,9 @@ def test_execute_batch_dml_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.ExecuteBatchDmlResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.ExecuteBatchDmlResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -5832,8 +6135,9 @@ def test_execute_batch_dml_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.ExecuteBatchDmlResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.ExecuteBatchDmlResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -5976,8 +6280,9 @@ def test_read_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = result_set.ResultSet.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = result_set.ResultSet.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6058,8 +6363,9 @@ def test_read_rest_required_fields(request_type=spanner.ReadRequest): response_value = Response() response_value.status_code = 200 - pb_return_value = result_set.ResultSet.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = result_set.ResultSet.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6203,8 +6509,9 @@ def test_streaming_read_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = result_set.PartialResultSet.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = result_set.PartialResultSet.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -6294,8 +6601,9 @@ def test_streaming_read_rest_required_fields(request_type=spanner.ReadRequest): response_value = Response() response_value.status_code = 200 - pb_return_value = result_set.PartialResultSet.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = result_set.PartialResultSet.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -6444,8 +6752,9 @@ def test_begin_transaction_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = transaction.Transaction.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = transaction.Transaction.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6521,8 +6830,9 @@ def test_begin_transaction_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = transaction.Transaction.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = transaction.Transaction.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6662,8 +6972,9 @@ def test_begin_transaction_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = transaction.Transaction.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = transaction.Transaction.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6733,8 +7044,9 @@ def test_commit_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = commit_response.CommitResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = commit_response.CommitResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6807,8 +7119,9 @@ def test_commit_rest_required_fields(request_type=spanner.CommitRequest): response_value = Response() response_value.status_code = 200 - pb_return_value = commit_response.CommitResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = commit_response.CommitResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6936,8 +7249,9 @@ def test_commit_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = commit_response.CommitResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = commit_response.CommitResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7279,8 +7593,9 @@ def test_partition_query_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.PartitionResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.PartitionResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7359,8 +7674,9 @@ def test_partition_query_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.PartitionResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.PartitionResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7501,8 +7817,9 @@ def test_partition_read_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.PartitionResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.PartitionResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7579,8 +7896,9 @@ def test_partition_read_rest_required_fields(request_type=spanner.PartitionReadR response_value = Response() response_value.status_code = 200 - pb_return_value = spanner.PartitionResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = spanner.PartitionResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7695,6 +8013,318 @@ def test_partition_read_rest_error(): ) +@pytest.mark.parametrize( + "request_type", + [ + spanner.BatchWriteRequest, + dict, + ], +) +def test_batch_write_rest(request_type): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "session": "projects/sample1/instances/sample2/databases/sample3/sessions/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = spanner.BatchWriteResponse( + indexes=[752], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = spanner.BatchWriteResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.batch_write(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner.BatchWriteResponse) + assert response.indexes == [752] + + +def test_batch_write_rest_required_fields(request_type=spanner.BatchWriteRequest): + transport_class = transports.SpannerRestTransport + + request_init = {} + request_init["session"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_write._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["session"] = "session_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_write._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "session" in jsonified_request + assert jsonified_request["session"] == "session_value" + + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = spanner.BatchWriteResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = spanner.BatchWriteResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.batch_write(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_batch_write_rest_unset_required_fields(): + transport = transports.SpannerRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.batch_write._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "session", + "mutationGroups", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_batch_write_rest_interceptors(null_interceptor): + transport = transports.SpannerRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.SpannerRestInterceptor(), + ) + client = SpannerClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.SpannerRestInterceptor, "post_batch_write" + ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "pre_batch_write" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = spanner.BatchWriteRequest.pb(spanner.BatchWriteRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = spanner.BatchWriteResponse.to_json( + spanner.BatchWriteResponse() + ) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = spanner.BatchWriteRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = spanner.BatchWriteResponse() + + client.batch_write( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_batch_write_rest_bad_request( + transport: str = "rest", request_type=spanner.BatchWriteRequest +): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "session": "projects/sample1/instances/sample2/databases/sample3/sessions/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.batch_write(request) + + +def test_batch_write_rest_flattened(): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = spanner.BatchWriteResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "session": "projects/sample1/instances/sample2/databases/sample3/sessions/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + session="session_value", + mutation_groups=[ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = spanner.BatchWriteResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.batch_write(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{session=projects/*/instances/*/databases/*/sessions/*}:batchWrite" + % client.transport._host, + args[1], + ) + + +def test_batch_write_rest_flattened_error(transport: str = "rest"): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_write( + spanner.BatchWriteRequest(), + session="session_value", + mutation_groups=[ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ], + ) + + +def test_batch_write_rest_error(): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.SpannerGrpcTransport( @@ -7849,6 +8479,7 @@ def test_spanner_base_transport(): "rollback", "partition_query", "partition_read", + "batch_write", ) for method in methods: with pytest.raises(NotImplementedError): @@ -8161,6 +8792,9 @@ def test_spanner_client_transport_session_collision(transport_name): session1 = client1.transport.partition_read._session session2 = client2.transport.partition_read._session assert session1 != session2 + session1 = client1.transport.batch_write._session + session2 = client2.transport.batch_write._session + assert session1 != session2 def test_spanner_grpc_transport_channel(): diff --git a/tests/unit/spanner_dbapi/test_batch_dml_executor.py b/tests/unit/spanner_dbapi/test_batch_dml_executor.py new file mode 100644 index 0000000000..3dc387bcb6 --- /dev/null +++ b/tests/unit/spanner_dbapi/test_batch_dml_executor.py @@ -0,0 +1,54 @@ +# Copyright 2023 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from unittest import mock + +from google.cloud.spanner_dbapi import ProgrammingError +from google.cloud.spanner_dbapi.batch_dml_executor import BatchDmlExecutor +from google.cloud.spanner_dbapi.parsed_statement import ( + ParsedStatement, + Statement, + StatementType, +) + + +class TestBatchDmlExecutor(unittest.TestCase): + @mock.patch("google.cloud.spanner_dbapi.cursor.Cursor") + def setUp(self, mock_cursor): + self._under_test = BatchDmlExecutor(mock_cursor) + + def test_execute_statement_non_dml_statement_type(self): + parsed_statement = ParsedStatement(StatementType.QUERY, Statement("sql")) + + with self.assertRaises(ProgrammingError): + self._under_test.execute_statement(parsed_statement) + + def test_execute_statement_insert_statement_type(self): + statement = Statement("sql") + + self._under_test.execute_statement( + ParsedStatement(StatementType.INSERT, statement) + ) + + self.assertEqual(self._under_test._statements, [statement]) + + def test_execute_statement_update_statement_type(self): + statement = Statement("sql") + + self._under_test.execute_statement( + ParsedStatement(StatementType.UPDATE, statement) + ) + + self.assertEqual(self._under_test._statements, [statement]) diff --git a/tests/unit/spanner_dbapi/test_connection.py b/tests/unit/spanner_dbapi/test_connection.py index 1628f84062..8996a06ce6 100644 --- a/tests/unit/spanner_dbapi/test_connection.py +++ b/tests/unit/spanner_dbapi/test_connection.py @@ -20,6 +20,20 @@ import warnings import pytest +from google.cloud.spanner_dbapi.batch_dml_executor import BatchMode +from google.cloud.spanner_dbapi.exceptions import ( + InterfaceError, + OperationalError, + ProgrammingError, +) +from google.cloud.spanner_dbapi import Connection +from google.cloud.spanner_dbapi.connection import CLIENT_TRANSACTION_NOT_STARTED_WARNING +from google.cloud.spanner_dbapi.parsed_statement import ( + ParsedStatement, + StatementType, + Statement, +) + PROJECT = "test-project" INSTANCE = "test-instance" DATABASE = "test-database" @@ -36,18 +50,21 @@ class _CredentialsWithScopes(credentials.Credentials, credentials.Scoped): class TestConnection(unittest.TestCase): + def setUp(self): + self._under_test = self._make_connection() + def _get_client_info(self): from google.api_core.gapic_v1.client_info import ClientInfo return ClientInfo(user_agent=USER_AGENT) def _make_connection(self, **kwargs): - from google.cloud.spanner_dbapi import Connection from google.cloud.spanner_v1.instance import Instance from google.cloud.spanner_v1.client import Client # We don't need a real Client object to test the constructor - instance = Instance(INSTANCE, client=Client) + client = Client() + instance = Instance(INSTANCE, client=client) database = instance.database(DATABASE) return Connection(instance, database, **kwargs) @@ -67,33 +84,13 @@ def test_autocommit_setter_transaction_not_started(self, mock_commit): @mock.patch("google.cloud.spanner_dbapi.connection.Connection.commit") def test_autocommit_setter_transaction_started(self, mock_commit): connection = self._make_connection() - connection._transaction = mock.Mock(committed=False, rolled_back=False) + connection._spanner_transaction_started = True connection.autocommit = True mock_commit.assert_called_once() self.assertTrue(connection._autocommit) - @mock.patch("google.cloud.spanner_dbapi.connection.Connection.commit") - def test_autocommit_setter_transaction_started_commited_rolled_back( - self, mock_commit - ): - connection = self._make_connection() - - connection._transaction = mock.Mock(committed=True, rolled_back=False) - - connection.autocommit = True - mock_commit.assert_not_called() - self.assertTrue(connection._autocommit) - - connection.autocommit = False - - connection._transaction = mock.Mock(committed=False, rolled_back=True) - - connection.autocommit = True - mock_commit.assert_not_called() - self.assertTrue(connection._autocommit) - def test_property_database(self): from google.cloud.spanner_v1.database import Database @@ -112,7 +109,7 @@ def test_read_only_connection(self): connection = self._make_connection(read_only=True) self.assertTrue(connection.read_only) - connection._transaction = mock.Mock(committed=False, rolled_back=False) + connection._spanner_transaction_started = True with self.assertRaisesRegex( ValueError, "Connection read/write mode can't be changed while a transaction is in progress. " @@ -120,7 +117,7 @@ def test_read_only_connection(self): ): connection.read_only = False - connection._transaction = None + connection._spanner_transaction_started = False connection.read_only = False self.assertFalse(connection.read_only) @@ -156,8 +153,6 @@ def _make_pool(): @mock.patch("google.cloud.spanner_v1.database.Database") def test__session_checkout(self, mock_database): - from google.cloud.spanner_dbapi import Connection - pool = self._make_pool() mock_database._pool = pool connection = Connection(INSTANCE, mock_database) @@ -171,8 +166,6 @@ def test__session_checkout(self, mock_database): self.assertEqual(connection._session, "db_session") def test_session_checkout_database_error(self): - from google.cloud.spanner_dbapi import Connection - connection = Connection(INSTANCE) with pytest.raises(ValueError): @@ -180,8 +173,6 @@ def test_session_checkout_database_error(self): @mock.patch("google.cloud.spanner_v1.database.Database") def test__release_session(self, mock_database): - from google.cloud.spanner_dbapi import Connection - pool = self._make_pool() mock_database._pool = pool connection = Connection(INSTANCE, mock_database) @@ -192,15 +183,11 @@ def test__release_session(self, mock_database): self.assertIsNone(connection._session) def test_release_session_database_error(self): - from google.cloud.spanner_dbapi import Connection - connection = Connection(INSTANCE) with pytest.raises(ValueError): connection._release_session() def test_transaction_checkout(self): - from google.cloud.spanner_dbapi import Connection - connection = Connection(INSTANCE, DATABASE) mock_checkout = mock.MagicMock(autospec=True) connection._session_checkout = mock_checkout @@ -210,8 +197,8 @@ def test_transaction_checkout(self): mock_checkout.assert_called_once_with() mock_transaction = mock.MagicMock() - mock_transaction.committed = mock_transaction.rolled_back = False connection._transaction = mock_transaction + connection._spanner_transaction_started = True self.assertEqual(connection.transaction_checkout(), mock_transaction) @@ -219,13 +206,13 @@ def test_transaction_checkout(self): self.assertIsNone(connection.transaction_checkout()) def test_snapshot_checkout(self): - from google.cloud.spanner_dbapi import Connection - connection = Connection(INSTANCE, DATABASE, read_only=True) connection.autocommit = False session_checkout = mock.MagicMock(autospec=True) connection._session_checkout = session_checkout + release_session = mock.MagicMock() + connection._release_session = release_session snapshot = connection.snapshot_checkout() session_checkout.assert_called_once() @@ -233,19 +220,20 @@ def test_snapshot_checkout(self): self.assertEqual(snapshot, connection.snapshot_checkout()) connection.commit() - self.assertIsNone(connection._snapshot) + self.assertIsNotNone(connection._snapshot) + release_session.assert_called_once() connection.snapshot_checkout() self.assertIsNotNone(connection._snapshot) connection.rollback() - self.assertIsNone(connection._snapshot) + self.assertIsNotNone(connection._snapshot) + self.assertEqual(release_session.call_count, 2) connection.autocommit = True self.assertIsNone(connection.snapshot_checkout()) - @mock.patch("google.cloud.spanner_v1.Client") - def test_close(self, mock_client): + def test_close(self): from google.cloud.spanner_dbapi import connect from google.cloud.spanner_dbapi import InterfaceError @@ -261,8 +249,8 @@ def test_close(self, mock_client): connection.cursor() mock_transaction = mock.MagicMock() - mock_transaction.committed = mock_transaction.rolled_back = False connection._transaction = mock_transaction + connection._spanner_transaction_started = True mock_rollback = mock.MagicMock() mock_transaction.rollback = mock_rollback @@ -278,36 +266,37 @@ def test_close(self, mock_client): self.assertTrue(connection.is_closed) @mock.patch.object(warnings, "warn") - def test_commit(self, mock_warn): - from google.cloud.spanner_dbapi import Connection - from google.cloud.spanner_dbapi.connection import AUTOCOMMIT_MODE_WARNING - - connection = Connection(INSTANCE, DATABASE) + def test_commit_with_spanner_transaction_not_started(self, mock_warn): + self._under_test._spanner_transaction_started = False with mock.patch( "google.cloud.spanner_dbapi.connection.Connection._release_session" ) as mock_release: - connection.commit() + self._under_test.commit() - mock_release.assert_not_called() + mock_release.assert_called() - connection._transaction = mock_transaction = mock.MagicMock( - rolled_back=False, committed=False - ) + def test_commit(self): + self._under_test._transaction = mock_transaction = mock.MagicMock() + self._under_test._spanner_transaction_started = True mock_transaction.commit = mock_commit = mock.MagicMock() with mock.patch( "google.cloud.spanner_dbapi.connection.Connection._release_session" ) as mock_release: - connection.commit() + self._under_test.commit() mock_commit.assert_called_once_with() mock_release.assert_called_once_with() - connection._autocommit = True - connection.commit() + @mock.patch.object(warnings, "warn") + def test_commit_in_autocommit_mode(self, mock_warn): + self._under_test._autocommit = True + + self._under_test.commit() + mock_warn.assert_called_once_with( - AUTOCOMMIT_MODE_WARNING, UserWarning, stacklevel=2 + CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2 ) def test_commit_database_error(self): @@ -319,38 +308,130 @@ def test_commit_database_error(self): connection.commit() @mock.patch.object(warnings, "warn") - def test_rollback(self, mock_warn): - from google.cloud.spanner_dbapi import Connection - from google.cloud.spanner_dbapi.connection import AUTOCOMMIT_MODE_WARNING - - connection = Connection(INSTANCE, DATABASE) + def test_rollback_spanner_transaction_not_started(self, mock_warn): + self._under_test._spanner_transaction_started = False with mock.patch( "google.cloud.spanner_dbapi.connection.Connection._release_session" ) as mock_release: - connection.rollback() + self._under_test.rollback() - mock_release.assert_not_called() + mock_release.assert_called() + @mock.patch.object(warnings, "warn") + def test_rollback(self, mock_warn): mock_transaction = mock.MagicMock() - connection._transaction = mock_transaction + self._under_test._spanner_transaction_started = True + self._under_test._transaction = mock_transaction mock_rollback = mock.MagicMock() mock_transaction.rollback = mock_rollback with mock.patch( "google.cloud.spanner_dbapi.connection.Connection._release_session" ) as mock_release: - connection.rollback() + self._under_test.rollback() mock_rollback.assert_called_once_with() mock_release.assert_called_once_with() - connection._autocommit = True - connection.rollback() + @mock.patch.object(warnings, "warn") + def test_rollback_in_autocommit_mode(self, mock_warn): + self._under_test._autocommit = True + + self._under_test.rollback() + mock_warn.assert_called_once_with( - AUTOCOMMIT_MODE_WARNING, UserWarning, stacklevel=2 + CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2 + ) + + def test_start_batch_dml_batch_mode_active(self): + self._under_test._batch_mode = BatchMode.DML + cursor = self._under_test.cursor() + + with self.assertRaises(ProgrammingError): + self._under_test.start_batch_dml(cursor) + + def test_start_batch_dml_connection_read_only(self): + self._under_test.read_only = True + cursor = self._under_test.cursor() + + with self.assertRaises(ProgrammingError): + self._under_test.start_batch_dml(cursor) + + def test_start_batch_dml(self): + cursor = self._under_test.cursor() + + self._under_test.start_batch_dml(cursor) + + self.assertEqual(self._under_test._batch_mode, BatchMode.DML) + + def test_execute_batch_dml_batch_mode_inactive(self): + self._under_test._batch_mode = BatchMode.NONE + + with self.assertRaises(ProgrammingError): + self._under_test.execute_batch_dml_statement( + ParsedStatement(StatementType.UPDATE, Statement("sql")) + ) + + @mock.patch( + "google.cloud.spanner_dbapi.batch_dml_executor.BatchDmlExecutor", autospec=True + ) + def test_execute_batch_dml(self, mock_batch_dml_executor): + self._under_test._batch_mode = BatchMode.DML + self._under_test._batch_dml_executor = mock_batch_dml_executor + + parsed_statement = ParsedStatement(StatementType.UPDATE, Statement("sql")) + self._under_test.execute_batch_dml_statement(parsed_statement) + + mock_batch_dml_executor.execute_statement.assert_called_once_with( + parsed_statement ) + @mock.patch( + "google.cloud.spanner_dbapi.batch_dml_executor.BatchDmlExecutor", autospec=True + ) + def test_run_batch_batch_mode_inactive(self, mock_batch_dml_executor): + self._under_test._batch_mode = BatchMode.NONE + self._under_test._batch_dml_executor = mock_batch_dml_executor + + with self.assertRaises(ProgrammingError): + self._under_test.run_batch() + + @mock.patch( + "google.cloud.spanner_dbapi.batch_dml_executor.BatchDmlExecutor", autospec=True + ) + def test_run_batch(self, mock_batch_dml_executor): + self._under_test._batch_mode = BatchMode.DML + self._under_test._batch_dml_executor = mock_batch_dml_executor + + self._under_test.run_batch() + + mock_batch_dml_executor.run_batch_dml.assert_called_once_with() + self.assertEqual(self._under_test._batch_mode, BatchMode.NONE) + self.assertEqual(self._under_test._batch_dml_executor, None) + + @mock.patch( + "google.cloud.spanner_dbapi.batch_dml_executor.BatchDmlExecutor", autospec=True + ) + def test_abort_batch_batch_mode_inactive(self, mock_batch_dml_executor): + self._under_test._batch_mode = BatchMode.NONE + self._under_test._batch_dml_executor = mock_batch_dml_executor + + with self.assertRaises(ProgrammingError): + self._under_test.abort_batch() + + @mock.patch( + "google.cloud.spanner_dbapi.batch_dml_executor.BatchDmlExecutor", autospec=True + ) + def test_abort_dml_batch(self, mock_batch_dml_executor): + self._under_test._batch_mode = BatchMode.DML + self._under_test._batch_dml_executor = mock_batch_dml_executor + + self._under_test.abort_batch() + + self.assertEqual(self._under_test._batch_mode, BatchMode.NONE) + self.assertEqual(self._under_test._batch_dml_executor, None) + @mock.patch("google.cloud.spanner_v1.database.Database", autospec=True) def test_run_prior_DDL_statements(self, mock_database): from google.cloud.spanner_dbapi import Connection, InterfaceError @@ -385,10 +466,37 @@ def test_as_context_manager(self): self.assertTrue(connection.is_closed) + def test_begin_cursor_closed(self): + self._under_test.close() + + with self.assertRaises(InterfaceError): + self._under_test.begin() + + self.assertEqual(self._under_test._transaction_begin_marked, False) + + def test_begin_transaction_begin_marked(self): + self._under_test._transaction_begin_marked = True + + with self.assertRaises(OperationalError): + self._under_test.begin() + + def test_begin_transaction_started(self): + self._under_test._spanner_transaction_started = True + + with self.assertRaises(OperationalError): + self._under_test.begin() + + self.assertEqual(self._under_test._transaction_begin_marked, False) + + def test_begin(self): + self._under_test.begin() + + self.assertEqual(self._under_test._transaction_begin_marked, True) + def test_run_statement_wo_retried(self): """Check that Connection remembers executed statements.""" from google.cloud.spanner_dbapi.checksum import ResultsChecksum - from google.cloud.spanner_dbapi.cursor import Statement + from google.cloud.spanner_dbapi.parsed_statement import Statement sql = """SELECT 23 FROM table WHERE id = @a1""" params = {"a1": "value"} @@ -407,7 +515,7 @@ def test_run_statement_wo_retried(self): def test_run_statement_w_retried(self): """Check that Connection doesn't remember re-executed statements.""" from google.cloud.spanner_dbapi.checksum import ResultsChecksum - from google.cloud.spanner_dbapi.cursor import Statement + from google.cloud.spanner_dbapi.parsed_statement import Statement sql = """SELECT 23 FROM table WHERE id = @a1""" params = {"a1": "value"} @@ -423,7 +531,7 @@ def test_run_statement_w_retried(self): def test_run_statement_w_heterogenous_insert_statements(self): """Check that Connection executed heterogenous insert statements.""" from google.cloud.spanner_dbapi.checksum import ResultsChecksum - from google.cloud.spanner_dbapi.cursor import Statement + from google.cloud.spanner_dbapi.parsed_statement import Statement from google.rpc.status_pb2 import Status from google.rpc.code_pb2 import OK @@ -444,7 +552,7 @@ def test_run_statement_w_heterogenous_insert_statements(self): def test_run_statement_w_homogeneous_insert_statements(self): """Check that Connection executed homogeneous insert statements.""" from google.cloud.spanner_dbapi.checksum import ResultsChecksum - from google.cloud.spanner_dbapi.cursor import Statement + from google.cloud.spanner_dbapi.parsed_statement import Statement from google.rpc.status_pb2 import Status from google.rpc.code_pb2 import OK @@ -469,7 +577,8 @@ def test_commit_clears_statements(self, mock_transaction): cleared, when the transaction is commited. """ connection = self._make_connection() - connection._transaction = mock.Mock(rolled_back=False, committed=False) + connection._spanner_transaction_started = True + connection._transaction = mock.Mock() connection._statements = [{}, {}] self.assertEqual(len(connection._statements), 2) @@ -485,7 +594,8 @@ def test_rollback_clears_statements(self, mock_transaction): cleared, when the transaction is roll backed. """ connection = self._make_connection() - connection._transaction = mock.Mock() + connection._spanner_transaction_started = True + connection._transaction = mock_transaction connection._statements = [{}, {}] self.assertEqual(len(connection._statements), 2) @@ -497,7 +607,7 @@ def test_rollback_clears_statements(self, mock_transaction): def test_retry_transaction_w_checksum_match(self): """Check retrying an aborted transaction.""" from google.cloud.spanner_dbapi.checksum import ResultsChecksum - from google.cloud.spanner_dbapi.cursor import Statement + from google.cloud.spanner_dbapi.parsed_statement import Statement row = ["field1", "field2"] connection = self._make_connection() @@ -526,7 +636,7 @@ def test_retry_transaction_w_checksum_mismatch(self): """ from google.cloud.spanner_dbapi.exceptions import RetryAborted from google.cloud.spanner_dbapi.checksum import ResultsChecksum - from google.cloud.spanner_dbapi.cursor import Statement + from google.cloud.spanner_dbapi.parsed_statement import Statement row = ["field1", "field2"] retried_row = ["field3", "field4"] @@ -550,7 +660,7 @@ def test_commit_retry_aborted_statements(self, mock_client): from google.api_core.exceptions import Aborted from google.cloud.spanner_dbapi.checksum import ResultsChecksum from google.cloud.spanner_dbapi.connection import connect - from google.cloud.spanner_dbapi.cursor import Statement + from google.cloud.spanner_dbapi.parsed_statement import Statement row = ["field1", "field2"] @@ -562,7 +672,8 @@ def test_commit_retry_aborted_statements(self, mock_client): statement = Statement("SELECT 1", [], {}, cursor._checksum) connection._statements.append(statement) - mock_transaction = mock.Mock(rolled_back=False, committed=False) + mock_transaction = mock.Mock() + connection._spanner_transaction_started = True connection._transaction = mock_transaction mock_transaction.commit.side_effect = [Aborted("Aborted"), None] run_mock = connection.run_statement = mock.Mock() @@ -572,20 +683,6 @@ def test_commit_retry_aborted_statements(self, mock_client): run_mock.assert_called_with(statement, retried=True) - def test_retry_transaction_drop_transaction(self): - """ - Check that before retrying an aborted transaction - connection drops the original aborted transaction. - """ - connection = self._make_connection() - transaction_mock = mock.Mock() - connection._transaction = transaction_mock - - # as we didn't set any statements, the method - # will only drop the transaction object - connection.retry_transaction() - self.assertIsNone(connection._transaction) - @mock.patch("google.cloud.spanner_v1.Client") def test_retry_aborted_retry(self, mock_client): """ @@ -595,7 +692,7 @@ def test_retry_aborted_retry(self, mock_client): from google.api_core.exceptions import Aborted from google.cloud.spanner_dbapi.checksum import ResultsChecksum from google.cloud.spanner_dbapi.connection import connect - from google.cloud.spanner_dbapi.cursor import Statement + from google.cloud.spanner_dbapi.parsed_statement import Statement row = ["field1", "field2"] @@ -628,7 +725,7 @@ def test_retry_transaction_raise_max_internal_retries(self): """Check retrying raise an error of max internal retries.""" from google.cloud.spanner_dbapi import connection as conn from google.cloud.spanner_dbapi.checksum import ResultsChecksum - from google.cloud.spanner_dbapi.cursor import Statement + from google.cloud.spanner_dbapi.parsed_statement import Statement conn.MAX_INTERNAL_RETRIES = 0 row = ["field1", "field2"] @@ -654,7 +751,7 @@ def test_retry_aborted_retry_without_delay(self, mock_client): from google.api_core.exceptions import Aborted from google.cloud.spanner_dbapi.checksum import ResultsChecksum from google.cloud.spanner_dbapi.connection import connect - from google.cloud.spanner_dbapi.cursor import Statement + from google.cloud.spanner_dbapi.parsed_statement import Statement row = ["field1", "field2"] @@ -687,7 +784,7 @@ def test_retry_aborted_retry_without_delay(self, mock_client): def test_retry_transaction_w_multiple_statement(self): """Check retrying an aborted transaction.""" from google.cloud.spanner_dbapi.checksum import ResultsChecksum - from google.cloud.spanner_dbapi.cursor import Statement + from google.cloud.spanner_dbapi.parsed_statement import Statement row = ["field1", "field2"] connection = self._make_connection() @@ -715,7 +812,7 @@ def test_retry_transaction_w_multiple_statement(self): def test_retry_transaction_w_empty_response(self): """Check retrying an aborted transaction.""" from google.cloud.spanner_dbapi.checksum import ResultsChecksum - from google.cloud.spanner_dbapi.cursor import Statement + from google.cloud.spanner_dbapi.parsed_statement import Statement row = [] connection = self._make_connection() @@ -832,7 +929,8 @@ def test_staleness_inside_transaction(self): option if a transaction is in progress. """ connection = self._make_connection() - connection._transaction = mock.Mock(committed=False, rolled_back=False) + connection._spanner_transaction_started = True + connection._transaction = mock.Mock() with self.assertRaises(ValueError): connection.staleness = {"read_timestamp": datetime.datetime(2021, 9, 21)} @@ -860,7 +958,8 @@ def test_staleness_multi_use(self): "session", multi_use=True, read_timestamp=timestamp ) - def test_staleness_single_use_autocommit(self): + @mock.patch("google.cloud.spanner_dbapi.cursor.PeekIterator") + def test_staleness_single_use_autocommit(self, MockedPeekIterator): """ Check that `staleness` option is correctly sent to the snapshot context manager. @@ -877,7 +976,8 @@ def test_staleness_single_use_autocommit(self): # mock snapshot context manager snapshot_obj = mock.Mock() - snapshot_obj.execute_sql = mock.Mock(return_value=[1]) + _result_set = mock.Mock() + snapshot_obj.execute_sql.return_value = _result_set snapshot_ctx = mock.Mock() snapshot_ctx.__enter__ = mock.Mock(return_value=snapshot_obj) @@ -891,7 +991,8 @@ def test_staleness_single_use_autocommit(self): connection.database.snapshot.assert_called_with(read_timestamp=timestamp) - def test_staleness_single_use_readonly_autocommit(self): + @mock.patch("google.cloud.spanner_dbapi.cursor.PeekIterator") + def test_staleness_single_use_readonly_autocommit(self, MockedPeekIterator): """ Check that `staleness` option is correctly sent to the snapshot context manager while in `autocommit` mode. @@ -909,7 +1010,8 @@ def test_staleness_single_use_readonly_autocommit(self): # mock snapshot context manager snapshot_obj = mock.Mock() - snapshot_obj.execute_sql = mock.Mock(return_value=[1]) + _result_set = mock.Mock() + snapshot_obj.execute_sql.return_value = _result_set snapshot_ctx = mock.Mock() snapshot_ctx.__enter__ = mock.Mock(return_value=snapshot_obj) @@ -925,7 +1027,7 @@ def test_staleness_single_use_readonly_autocommit(self): def test_request_priority(self): from google.cloud.spanner_dbapi.checksum import ResultsChecksum - from google.cloud.spanner_dbapi.cursor import Statement + from google.cloud.spanner_dbapi.parsed_statement import Statement from google.cloud.spanner_v1 import RequestOptions sql = "SELECT 1" @@ -934,7 +1036,8 @@ def test_request_priority(self): priority = 2 connection = self._make_connection() - connection._transaction = mock.Mock(committed=False, rolled_back=False) + connection._spanner_transaction_started = True + connection._transaction = mock.Mock() connection._transaction.execute_sql = mock.Mock() connection.request_priority = priority diff --git a/tests/unit/spanner_dbapi/test_cursor.py b/tests/unit/spanner_dbapi/test_cursor.py index f744fc769f..3328b0e17f 100644 --- a/tests/unit/spanner_dbapi/test_cursor.py +++ b/tests/unit/spanner_dbapi/test_cursor.py @@ -13,14 +13,18 @@ # limitations under the License. """Cursor() class unit tests.""" - -import mock +from unittest import mock import sys import unittest +from google.cloud.spanner_dbapi.parsed_statement import ( + ParsedStatement, + StatementType, + Statement, +) -class TestCursor(unittest.TestCase): +class TestCursor(unittest.TestCase): INSTANCE = "test-instance" DATABASE = "test-database" @@ -106,7 +110,7 @@ def test_do_execute_update(self): result_set.stats = ResultSetStats(row_count_exact=1234) transaction.execute_sql.return_value = result_set - cursor._do_execute_update( + cursor._do_execute_update_in_autocommit( transaction=transaction, sql="SELECT * WHERE true", params={}, @@ -183,7 +187,6 @@ def test_execute_autocommit_off(self): self.assertIsInstance(cursor._itr, PeekIterator) def test_execute_insert_statement_autocommit_off(self): - from google.cloud.spanner_dbapi import parse_utils from google.cloud.spanner_dbapi.checksum import ResultsChecksum from google.cloud.spanner_dbapi.utils import PeekIterator @@ -193,54 +196,54 @@ def test_execute_insert_statement_autocommit_off(self): cursor.connection.transaction_checkout = mock.MagicMock(autospec=True) cursor._checksum = ResultsChecksum() + sql = "INSERT INTO django_migrations (app, name, applied) VALUES (%s, %s, %s)" with mock.patch( - "google.cloud.spanner_dbapi.parse_utils.classify_stmt", - return_value=parse_utils.STMT_UPDATING, + "google.cloud.spanner_dbapi.parse_utils.classify_statement", + return_value=ParsedStatement(StatementType.UPDATE, sql), ): with mock.patch( "google.cloud.spanner_dbapi.connection.Connection.run_statement", return_value=(mock.MagicMock(), ResultsChecksum()), ): - cursor.execute( - sql="INSERT INTO django_migrations (app, name, applied) VALUES (%s, %s, %s)" - ) + cursor.execute(sql) self.assertIsInstance(cursor._result_set, mock.MagicMock) self.assertIsInstance(cursor._itr, PeekIterator) def test_execute_statement(self): - from google.cloud.spanner_dbapi import parse_utils - connection = self._make_connection(self.INSTANCE, mock.MagicMock()) cursor = self._make_one(connection) + sql = "sql" with mock.patch( - "google.cloud.spanner_dbapi.parse_utils.classify_stmt", - side_effect=[parse_utils.STMT_DDL, parse_utils.STMT_UPDATING], - ) as mock_classify_stmt: - sql = "sql" + "google.cloud.spanner_dbapi.parse_utils.classify_statement", + side_effect=[ + ParsedStatement(StatementType.DDL, Statement(sql)), + ParsedStatement(StatementType.UPDATE, Statement(sql)), + ], + ) as mockclassify_statement: with self.assertRaises(ValueError): cursor.execute(sql=sql) - mock_classify_stmt.assert_called_with(sql) - self.assertEqual(mock_classify_stmt.call_count, 2) + mockclassify_statement.assert_called_with(sql) + self.assertEqual(mockclassify_statement.call_count, 2) self.assertEqual(cursor.connection._ddl_statements, []) with mock.patch( - "google.cloud.spanner_dbapi.parse_utils.classify_stmt", - return_value=parse_utils.STMT_DDL, - ) as mock_classify_stmt: + "google.cloud.spanner_dbapi.parse_utils.classify_statement", + return_value=ParsedStatement(StatementType.DDL, Statement(sql)), + ) as mockclassify_statement: sql = "sql" cursor.execute(sql=sql) - mock_classify_stmt.assert_called_with(sql) - self.assertEqual(mock_classify_stmt.call_count, 2) + mockclassify_statement.assert_called_with(sql) + self.assertEqual(mockclassify_statement.call_count, 2) self.assertEqual(cursor.connection._ddl_statements, [sql]) with mock.patch( - "google.cloud.spanner_dbapi.parse_utils.classify_stmt", - return_value=parse_utils.STMT_NON_UPDATING, + "google.cloud.spanner_dbapi.parse_utils.classify_statement", + return_value=ParsedStatement(StatementType.QUERY, Statement(sql)), ): with mock.patch( "google.cloud.spanner_dbapi.cursor.Cursor._handle_DQL", - return_value=parse_utils.STMT_NON_UPDATING, + return_value=ParsedStatement(StatementType.QUERY, Statement(sql)), ) as mock_handle_ddl: connection.autocommit = True sql = "sql" @@ -248,14 +251,15 @@ def test_execute_statement(self): mock_handle_ddl.assert_called_once_with(sql, None) with mock.patch( - "google.cloud.spanner_dbapi.parse_utils.classify_stmt", - return_value="other_statement", + "google.cloud.spanner_dbapi.parse_utils.classify_statement", + return_value=ParsedStatement(StatementType.UPDATE, Statement(sql)), ): cursor.connection._database = mock_db = mock.MagicMock() mock_db.run_in_transaction = mock_run_in = mock.MagicMock() - sql = "sql" - cursor.execute(sql=sql) - mock_run_in.assert_called_once_with(cursor._do_execute_update, sql, None) + cursor.execute(sql="sql") + mock_run_in.assert_called_once_with( + cursor._do_execute_update_in_autocommit, "sql", None + ) def test_execute_integrity_error(self): from google.api_core import exceptions @@ -265,21 +269,25 @@ def test_execute_integrity_error(self): cursor = self._make_one(connection) with mock.patch( - "google.cloud.spanner_dbapi.parse_utils.classify_stmt", + "google.cloud.spanner_dbapi.parse_utils.classify_statement", side_effect=exceptions.AlreadyExists("message"), ): with self.assertRaises(IntegrityError): cursor.execute(sql="sql") + connection = self._make_connection(self.INSTANCE, mock.MagicMock()) + cursor = self._make_one(connection) with mock.patch( - "google.cloud.spanner_dbapi.parse_utils.classify_stmt", + "google.cloud.spanner_dbapi.parse_utils.classify_statement", side_effect=exceptions.FailedPrecondition("message"), ): with self.assertRaises(IntegrityError): cursor.execute(sql="sql") + connection = self._make_connection(self.INSTANCE, mock.MagicMock()) + cursor = self._make_one(connection) with mock.patch( - "google.cloud.spanner_dbapi.parse_utils.classify_stmt", + "google.cloud.spanner_dbapi.parse_utils.classify_statement", side_effect=exceptions.OutOfRange("message"), ): with self.assertRaises(IntegrityError): @@ -293,7 +301,7 @@ def test_execute_invalid_argument(self): cursor = self._make_one(connection) with mock.patch( - "google.cloud.spanner_dbapi.parse_utils.classify_stmt", + "google.cloud.spanner_dbapi.parse_utils.classify_statement", side_effect=exceptions.InvalidArgument("message"), ): with self.assertRaises(ProgrammingError): @@ -307,7 +315,7 @@ def test_execute_internal_server_error(self): cursor = self._make_one(connection) with mock.patch( - "google.cloud.spanner_dbapi.parse_utils.classify_stmt", + "google.cloud.spanner_dbapi.parse_utils.classify_statement", side_effect=exceptions.InternalServerError("message"), ): with self.assertRaises(OperationalError): @@ -337,6 +345,20 @@ def test_executemany_DLL(self, mock_client): with self.assertRaises(ProgrammingError): cursor.executemany("""DROP DATABASE database_name""", ()) + def test_executemany_client_statement(self): + from google.cloud.spanner_dbapi import connect, ProgrammingError + + connection = connect("test-instance", "test-database") + + cursor = connection.cursor() + + with self.assertRaises(ProgrammingError) as error: + cursor.executemany("""COMMIT TRANSACTION""", ()) + self.assertEqual( + str(error.exception), + "Executing the following operation: COMMIT TRANSACTION, with executemany() method is not allowed.", + ) + @mock.patch("google.cloud.spanner_v1.Client") def test_executemany(self, mock_client): from google.cloud.spanner_dbapi import connect @@ -600,12 +622,12 @@ def test_executemany_insert_batch_aborted(self): self.assertEqual( connection._statements[0][0], [ - ( + Statement( """INSERT INTO table (col1, "col2", `col3`, `"col4"`) VALUES (@a0, @a1, @a2, @a3)""", {"a0": 1, "a1": 2, "a2": 3, "a3": 4}, {"a0": INT64, "a1": INT64, "a2": INT64, "a3": INT64}, ), - ( + Statement( """INSERT INTO table (col1, "col2", `col3`, `"col4"`) VALUES (@a0, @a1, @a2, @a3)""", {"a0": 5, "a1": 6, "a2": 7, "a3": 8}, {"a0": INT64, "a1": INT64, "a2": INT64, "a3": INT64}, @@ -732,8 +754,8 @@ def test_setoutputsize(self): with self.assertRaises(exceptions.InterfaceError): cursor.setoutputsize(size=None) - def test_handle_dql(self): - from google.cloud.spanner_dbapi import utils + @mock.patch("google.cloud.spanner_dbapi.cursor.PeekIterator") + def test_handle_dql(self, MockedPeekIterator): from google.cloud.spanner_dbapi.cursor import _UNSET_COUNT connection = self._make_connection(self.INSTANCE, mock.MagicMock()) @@ -742,14 +764,15 @@ def test_handle_dql(self): ) = mock.MagicMock() cursor = self._make_one(connection) - mock_snapshot.execute_sql.return_value = ["0"] + _result_set = mock.Mock() + mock_snapshot.execute_sql.return_value = _result_set cursor._handle_DQL("sql", params=None) - self.assertEqual(cursor._result_set, ["0"]) - self.assertIsInstance(cursor._itr, utils.PeekIterator) + self.assertEqual(cursor._result_set, _result_set) + self.assertEqual(cursor._itr, MockedPeekIterator()) self.assertEqual(cursor._row_count, _UNSET_COUNT) - def test_handle_dql_priority(self): - from google.cloud.spanner_dbapi import utils + @mock.patch("google.cloud.spanner_dbapi.cursor.PeekIterator") + def test_handle_dql_priority(self, MockedPeekIterator): from google.cloud.spanner_dbapi.cursor import _UNSET_COUNT from google.cloud.spanner_v1 import RequestOptions @@ -762,10 +785,11 @@ def test_handle_dql_priority(self): cursor = self._make_one(connection) sql = "sql" - mock_snapshot.execute_sql.return_value = ["0"] + _result_set = mock.Mock() + mock_snapshot.execute_sql.return_value = _result_set cursor._handle_DQL(sql, params=None) - self.assertEqual(cursor._result_set, ["0"]) - self.assertIsInstance(cursor._itr, utils.PeekIterator) + self.assertEqual(cursor._result_set, _result_set) + self.assertEqual(cursor._itr, MockedPeekIterator()) self.assertEqual(cursor._row_count, _UNSET_COUNT) mock_snapshot.execute_sql.assert_called_with( sql, None, None, request_options=RequestOptions(priority=1) @@ -917,7 +941,6 @@ def test_fetchone_retry_aborted(self, mock_client): with mock.patch( "google.cloud.spanner_dbapi.connection.Connection.retry_transaction" ) as retry_mock: - cursor.fetchone() retry_mock.assert_called_with() @@ -948,7 +971,6 @@ def test_fetchone_retry_aborted_statements(self, mock_client): "google.cloud.spanner_dbapi.connection.Connection.run_statement", return_value=([row], ResultsChecksum()), ) as run_mock: - cursor.fetchone() run_mock.assert_called_with(statement, retried=True) @@ -982,7 +1004,6 @@ def test_fetchone_retry_aborted_statements_checksums_mismatch(self, mock_client) "google.cloud.spanner_dbapi.connection.Connection.run_statement", return_value=([row2], ResultsChecksum()), ) as run_mock: - with self.assertRaises(RetryAborted): cursor.fetchone() @@ -1007,7 +1028,6 @@ def test_fetchall_retry_aborted(self, mock_client): with mock.patch( "google.cloud.spanner_dbapi.connection.Connection.retry_transaction" ) as retry_mock: - cursor.fetchall() retry_mock.assert_called_with() @@ -1071,7 +1091,6 @@ def test_fetchall_retry_aborted_statements_checksums_mismatch(self, mock_client) "google.cloud.spanner_dbapi.connection.Connection.run_statement", return_value=([row2], ResultsChecksum()), ) as run_mock: - with self.assertRaises(RetryAborted): cursor.fetchall() @@ -1096,7 +1115,6 @@ def test_fetchmany_retry_aborted(self, mock_client): with mock.patch( "google.cloud.spanner_dbapi.connection.Connection.retry_transaction" ) as retry_mock: - cursor.fetchmany() retry_mock.assert_called_with() @@ -1127,7 +1145,6 @@ def test_fetchmany_retry_aborted_statements(self, mock_client): "google.cloud.spanner_dbapi.connection.Connection.run_statement", return_value=([row], ResultsChecksum()), ) as run_mock: - cursor.fetchmany(len(row)) run_mock.assert_called_with(statement, retried=True) @@ -1161,7 +1178,6 @@ def test_fetchmany_retry_aborted_statements_checksums_mismatch(self, mock_client "google.cloud.spanner_dbapi.connection.Connection.run_statement", return_value=([row2], ResultsChecksum()), ) as run_mock: - with self.assertRaises(RetryAborted): cursor.fetchmany(len(row)) diff --git a/tests/unit/spanner_dbapi/test_parse_utils.py b/tests/unit/spanner_dbapi/test_parse_utils.py index ddd1d5572a..de7b9a6dce 100644 --- a/tests/unit/spanner_dbapi/test_parse_utils.py +++ b/tests/unit/spanner_dbapi/test_parse_utils.py @@ -15,55 +15,91 @@ import sys import unittest +from google.cloud.spanner_dbapi.parsed_statement import ( + StatementType, + ParsedStatement, + Statement, + ClientSideStatementType, +) from google.cloud.spanner_v1 import param_types from google.cloud.spanner_v1 import JsonObject +from google.cloud.spanner_dbapi.parse_utils import classify_statement class TestParseUtils(unittest.TestCase): - skip_condition = sys.version_info[0] < 3 skip_message = "Subtests are not supported in Python 2" def test_classify_stmt(self): - from google.cloud.spanner_dbapi.parse_utils import STMT_DDL - from google.cloud.spanner_dbapi.parse_utils import STMT_INSERT - from google.cloud.spanner_dbapi.parse_utils import STMT_NON_UPDATING - from google.cloud.spanner_dbapi.parse_utils import STMT_UPDATING - from google.cloud.spanner_dbapi.parse_utils import classify_stmt - cases = ( - ("SELECT 1", STMT_NON_UPDATING), - ("SELECT s.SongName FROM Songs AS s", STMT_NON_UPDATING), - ("(SELECT s.SongName FROM Songs AS s)", STMT_NON_UPDATING), + ("SELECT 1", StatementType.QUERY), + ("SELECT s.SongName FROM Songs AS s", StatementType.QUERY), + ("(SELECT s.SongName FROM Songs AS s)", StatementType.QUERY), ( "WITH sq AS (SELECT SchoolID FROM Roster) SELECT * from sq", - STMT_NON_UPDATING, + StatementType.QUERY, ), ( "CREATE TABLE django_content_type (id STRING(64) NOT NULL, name STRING(100) " "NOT NULL, app_label STRING(100) NOT NULL, model STRING(100) NOT NULL) PRIMARY KEY(id)", - STMT_DDL, + StatementType.DDL, ), ( "CREATE INDEX SongsBySingerAlbumSongNameDesc ON " "Songs(SingerId, AlbumId, SongName DESC), INTERLEAVE IN Albums", - STMT_DDL, + StatementType.DDL, ), - ("CREATE INDEX SongsBySongName ON Songs(SongName)", STMT_DDL), + ("CREATE INDEX SongsBySongName ON Songs(SongName)", StatementType.DDL), ( "CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle) STORING (MarketingBudget)", - STMT_DDL, + StatementType.DDL, ), - ("CREATE ROLE parent", STMT_DDL), - ("GRANT SELECT ON TABLE Singers TO ROLE parent", STMT_DDL), - ("REVOKE SELECT ON TABLE Singers TO ROLE parent", STMT_DDL), - ("GRANT ROLE parent TO ROLE child", STMT_DDL), - ("INSERT INTO table (col1) VALUES (1)", STMT_INSERT), - ("UPDATE table SET col1 = 1 WHERE col1 = NULL", STMT_UPDATING), + ("CREATE ROLE parent", StatementType.DDL), + ("commit", StatementType.CLIENT_SIDE), + ("begin", StatementType.CLIENT_SIDE), + ("start", StatementType.CLIENT_SIDE), + ("begin transaction", StatementType.CLIENT_SIDE), + ("start transaction", StatementType.CLIENT_SIDE), + ("rollback", StatementType.CLIENT_SIDE), + (" commit TRANSACTION ", StatementType.CLIENT_SIDE), + (" rollback TRANSACTION ", StatementType.CLIENT_SIDE), + (" SHOW VARIABLE COMMIT_TIMESTAMP ", StatementType.CLIENT_SIDE), + ("SHOW VARIABLE READ_TIMESTAMP", StatementType.CLIENT_SIDE), + ("GRANT SELECT ON TABLE Singers TO ROLE parent", StatementType.DDL), + ("REVOKE SELECT ON TABLE Singers TO ROLE parent", StatementType.DDL), + ("GRANT ROLE parent TO ROLE child", StatementType.DDL), + ("INSERT INTO table (col1) VALUES (1)", StatementType.INSERT), + ("UPDATE table SET col1 = 1 WHERE col1 = NULL", StatementType.UPDATE), ) for query, want_class in cases: - self.assertEqual(classify_stmt(query), want_class) + self.assertEqual(classify_statement(query).statement_type, want_class) + + def test_partition_query_classify_stmt(self): + parsed_statement = classify_statement( + " PARTITION SELECT s.SongName FROM Songs AS s " + ) + self.assertEqual( + parsed_statement, + ParsedStatement( + StatementType.CLIENT_SIDE, + Statement("PARTITION SELECT s.SongName FROM Songs AS s"), + ClientSideStatementType.PARTITION_QUERY, + ["SELECT s.SongName FROM Songs AS s"], + ), + ) + + def test_run_partition_classify_stmt(self): + parsed_statement = classify_statement(" RUN PARTITION bj2bjb2j2bj2ebbh ") + self.assertEqual( + parsed_statement, + ParsedStatement( + StatementType.CLIENT_SIDE, + Statement("RUN PARTITION bj2bjb2j2bj2ebbh"), + ClientSideStatementType.RUN_PARTITION, + ["bj2bjb2j2bj2ebbh"], + ), + ) @unittest.skipIf(skip_condition, skip_message) def test_sql_pyformat_args_to_spanner(self): @@ -112,7 +148,7 @@ def test_sql_pyformat_args_to_spanner(self): ("SELECT * from t WHERE id=10", {"f1": "app", "f2": "name"}), ), ] - for ((sql_in, params), sql_want) in cases: + for (sql_in, params), sql_want in cases: with self.subTest(sql=sql_in): got_sql, got_named_args = sql_pyformat_args_to_spanner(sql_in, params) want_sql, want_named_args = sql_want diff --git a/tests/unit/spanner_dbapi/test_parser.py b/tests/unit/spanner_dbapi/test_parser.py index dd99f6fa4b..25f51591c2 100644 --- a/tests/unit/spanner_dbapi/test_parser.py +++ b/tests/unit/spanner_dbapi/test_parser.py @@ -17,7 +17,6 @@ class TestParser(unittest.TestCase): - skip_condition = sys.version_info[0] < 3 skip_message = "Subtests are not supported in Python 2" diff --git a/tests/unit/spanner_dbapi/test_types.py b/tests/unit/spanner_dbapi/test_types.py index 8c9dbe6c2b..375dc31853 100644 --- a/tests/unit/spanner_dbapi/test_types.py +++ b/tests/unit/spanner_dbapi/test_types.py @@ -18,7 +18,6 @@ class TestTypes(unittest.TestCase): - TICKS = 1572822862.9782631 + timezone # Sun 03 Nov 2019 23:14:22 UTC def test__date_from_ticks(self): diff --git a/tests/unit/spanner_dbapi/test_utils.py b/tests/unit/spanner_dbapi/test_utils.py index 76c347d402..fadbca1a09 100644 --- a/tests/unit/spanner_dbapi/test_utils.py +++ b/tests/unit/spanner_dbapi/test_utils.py @@ -17,7 +17,6 @@ class TestUtils(unittest.TestCase): - skip_condition = sys.version_info[0] < 3 skip_message = "Subtests are not supported in Python 2" diff --git a/tests/unit/test_batch.py b/tests/unit/test_batch.py index 0199d44033..203c8a0cb5 100644 --- a/tests/unit/test_batch.py +++ b/tests/unit/test_batch.py @@ -32,7 +32,6 @@ class _BaseTest(unittest.TestCase): - PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID @@ -414,6 +413,130 @@ class _BailOut(Exception): self.assertEqual(len(batch._mutations), 1) +class TestMutationGroups(_BaseTest, OpenTelemetryBase): + def _getTargetClass(self): + from google.cloud.spanner_v1.batch import MutationGroups + + return MutationGroups + + def test_ctor(self): + session = _Session() + groups = self._make_one(session) + self.assertIs(groups._session, session) + + def test_batch_write_already_committed(self): + from google.cloud.spanner_v1.keyset import KeySet + + keys = [[0], [1], [2]] + keyset = KeySet(keys=keys) + database = _Database() + database.spanner_api = _FauxSpannerAPI(_batch_write_response=[]) + session = _Session(database) + groups = self._make_one(session) + group = groups.group() + group.delete(TABLE_NAME, keyset=keyset) + groups.batch_write() + self.assertSpanAttributes( + "CloudSpanner.BatchWrite", + status=StatusCode.OK, + attributes=dict(BASE_ATTRIBUTES, num_mutation_groups=1), + ) + assert groups.committed + # The second call to batch_write should raise an error. + with self.assertRaises(ValueError): + groups.batch_write() + + def test_batch_write_grpc_error(self): + from google.api_core.exceptions import Unknown + from google.cloud.spanner_v1.keyset import KeySet + + keys = [[0], [1], [2]] + keyset = KeySet(keys=keys) + database = _Database() + database.spanner_api = _FauxSpannerAPI(_rpc_error=True) + session = _Session(database) + groups = self._make_one(session) + group = groups.group() + group.delete(TABLE_NAME, keyset=keyset) + + with self.assertRaises(Unknown): + groups.batch_write() + + self.assertSpanAttributes( + "CloudSpanner.BatchWrite", + status=StatusCode.ERROR, + attributes=dict(BASE_ATTRIBUTES, num_mutation_groups=1), + ) + + def _test_batch_write_with_request_options(self, request_options=None): + import datetime + from google.cloud.spanner_v1 import BatchWriteResponse + from google.cloud._helpers import UTC + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.rpc.status_pb2 import Status + + now = datetime.datetime.utcnow().replace(tzinfo=UTC) + now_pb = _datetime_to_pb_timestamp(now) + status_pb = Status(code=200) + response = BatchWriteResponse( + commit_timestamp=now_pb, indexes=[0], status=status_pb + ) + database = _Database() + api = database.spanner_api = _FauxSpannerAPI(_batch_write_response=[response]) + session = _Session(database) + groups = self._make_one(session) + group = groups.group() + group.insert(TABLE_NAME, COLUMNS, VALUES) + + response_iter = groups.batch_write(request_options) + self.assertEqual(len(response_iter), 1) + self.assertEqual(response_iter[0], response) + + ( + session, + mutation_groups, + actual_request_options, + metadata, + ) = api._batch_request + self.assertEqual(session, self.SESSION_NAME) + self.assertEqual(mutation_groups, groups._mutation_groups) + self.assertEqual( + metadata, + [ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ], + ) + if request_options is None: + expected_request_options = RequestOptions() + elif type(request_options) is dict: + expected_request_options = RequestOptions(request_options) + else: + expected_request_options = request_options + self.assertEqual(actual_request_options, expected_request_options) + + self.assertSpanAttributes( + "CloudSpanner.BatchWrite", + status=StatusCode.OK, + attributes=dict(BASE_ATTRIBUTES, num_mutation_groups=1), + ) + + def test_batch_write_no_request_options(self): + self._test_batch_write_with_request_options() + + def test_batch_write_w_transaction_tag_success(self): + self._test_batch_write_with_request_options( + RequestOptions(transaction_tag="tag-1-1") + ) + + def test_batch_write_w_transaction_tag_dictionary_success(self): + self._test_batch_write_with_request_options({"transaction_tag": "tag-1-1"}) + + def test_batch_write_w_incorrect_tag_dictionary_error(self): + with self.assertRaises(ValueError): + self._test_batch_write_with_request_options({"incorrect_tag": "tag-1-1"}) + + class _Session(object): def __init__(self, database=None, name=TestBatch.SESSION_NAME): self._database = database @@ -426,10 +549,10 @@ class _Database(object): class _FauxSpannerAPI: - _create_instance_conflict = False _instance_not_found = False _committed = None + _batch_request = None _rpc_error = False def __init__(self, **kwargs): @@ -453,3 +576,20 @@ def commit( if self._rpc_error: raise Unknown("error") return self._commit_response + + def batch_write( + self, + request=None, + metadata=None, + ): + from google.api_core.exceptions import Unknown + + self._batch_request = ( + request.session, + request.mutation_groups, + request.request_options, + metadata, + ) + if self._rpc_error: + raise Unknown("error") + return self._batch_write_response diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index ed79271a96..8fb5b13a9a 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -15,6 +15,7 @@ import unittest import mock +from google.cloud.spanner_v1 import DirectedReadOptions def _make_credentials(): @@ -29,7 +30,6 @@ class _CredentialsWithScopes( class TestClient(unittest.TestCase): - PROJECT = "PROJECT" PATH = "projects/%s" % (PROJECT,) CONFIGURATION_NAME = "config-name" @@ -41,6 +41,17 @@ class TestClient(unittest.TestCase): LABELS = {"test": "true"} TIMEOUT_SECONDS = 80 LEADER_OPTIONS = ["leader1", "leader2"] + DIRECTED_READ_OPTIONS = { + "include_replicas": { + "replica_selections": [ + { + "location": "us-west1", + "type_": DirectedReadOptions.ReplicaSelection.Type.READ_ONLY, + }, + ], + "auto_failover_disabled": True, + }, + } def _get_target_class(self): from google.cloud import spanner @@ -60,6 +71,7 @@ def _constructor_test_helper( query_options=None, expected_query_options=None, route_to_leader_enabled=True, + directed_read_options=None, ): import google.api_core.client_options from google.cloud.spanner_v1 import client as MUT @@ -85,6 +97,7 @@ def _constructor_test_helper( project=self.PROJECT, credentials=creds, query_options=query_options, + directed_read_options=directed_read_options, **kwargs ) @@ -113,6 +126,8 @@ def _constructor_test_helper( self.assertEqual(client.route_to_leader_enabled, route_to_leader_enabled) else: self.assertFalse(client.route_to_leader_enabled) + if directed_read_options is not None: + self.assertEqual(client.directed_read_options, directed_read_options) @mock.patch("google.cloud.spanner_v1.client._get_spanner_emulator_host") @mock.patch("warnings.warn") @@ -226,6 +241,15 @@ def test_constructor_custom_query_options_env_config(self, mock_ver, mock_stats) expected_query_options=expected_query_options, ) + def test_constructor_w_directed_read_options(self): + from google.cloud.spanner_v1 import client as MUT + + expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) + creds = _make_credentials() + self._constructor_test_helper( + expected_scopes, creds, directed_read_options=self.DIRECTED_READ_OPTIONS + ) + def test_constructor_route_to_leader_disbled(self): from google.cloud.spanner_v1 import client as MUT diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 5a6abf8084..88e7bf8f66 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -22,7 +22,7 @@ from google.api_core.retry import Retry from google.protobuf.field_mask_pb2 import FieldMask -from google.cloud.spanner_v1 import RequestOptions +from google.cloud.spanner_v1 import RequestOptions, DirectedReadOptions DML_WO_PARAM = """ DELETE FROM citizens @@ -35,6 +35,17 @@ PARAMS = {"age": 30} PARAM_TYPES = {"age": INT64} MODE = 2 # PROFILE +DIRECTED_READ_OPTIONS = { + "include_replicas": { + "replica_selections": [ + { + "location": "us-west1", + "type_": DirectedReadOptions.ReplicaSelection.Type.READ_ONLY, + }, + ], + "auto_failover_disabled": True, + }, +} def _make_credentials(): # pragma: NO COVER @@ -49,7 +60,6 @@ class _CredentialsWithScopes( class _BaseTest(unittest.TestCase): - PROJECT_ID = "project-id" PARENT = "projects/" + PROJECT_ID INSTANCE_ID = "instance-id" @@ -148,14 +158,12 @@ def test_ctor_w_route_to_leader_disbled(self): self.assertFalse(database._route_to_leader_enabled) def test_ctor_w_ddl_statements_non_string(self): - with self.assertRaises(ValueError): self._make_one( self.DATABASE_ID, instance=object(), ddl_statements=[object()] ) def test_ctor_w_ddl_statements_w_create_database(self): - with self.assertRaises(ValueError): self._make_one( self.DATABASE_ID, @@ -199,6 +207,16 @@ def test_ctor_w_encryption_config(self): self.assertIs(database._instance, instance) self.assertEqual(database._encryption_config, encryption_config) + def test_ctor_w_directed_read_options(self): + client = _Client(directed_read_options=DIRECTED_READ_OPTIONS) + instance = _Instance(self.INSTANCE_NAME, client=client) + database = self._make_one( + self.DATABASE_ID, instance, database_role=self.DATABASE_ROLE + ) + self.assertEqual(database.database_id, self.DATABASE_ID) + self.assertIs(database._instance, instance) + self.assertEqual(database._directed_read_options, DIRECTED_READ_OPTIONS) + def test_from_pb_bad_database_name(self): from google.cloud.spanner_admin_database_v1 import Database @@ -365,7 +383,6 @@ def test_default_leader(self): self.assertEqual(database.default_leader, default_leader) def test_spanner_api_property_w_scopeless_creds(self): - client = _Client() client_info = client._client_info = mock.Mock() client_options = client._client_options = mock.Mock() @@ -1235,6 +1252,20 @@ def test_batch(self): self.assertIsInstance(checkout, BatchCheckout) self.assertIs(checkout._database, database) + def test_mutation_groups(self): + from google.cloud.spanner_v1.database import MutationGroupsCheckout + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + pool = _Pool() + session = _Session() + pool.put(session) + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + + checkout = database.mutation_groups() + self.assertIsInstance(checkout, MutationGroupsCheckout) + self.assertIs(checkout._database, database) + def test_batch_snapshot(self): from google.cloud.spanner_v1.database import BatchSnapshot @@ -2107,7 +2138,10 @@ def test__get_snapshot_new_wo_staleness(self): snapshot = session.snapshot.return_value = self._make_snapshot() self.assertIs(batch_txn._get_snapshot(), snapshot) session.snapshot.assert_called_once_with( - read_timestamp=None, exact_staleness=None, multi_use=True + read_timestamp=None, + exact_staleness=None, + multi_use=True, + transaction_id=None, ) snapshot.begin.assert_called_once_with() @@ -2119,7 +2153,10 @@ def test__get_snapshot_w_read_timestamp(self): snapshot = session.snapshot.return_value = self._make_snapshot() self.assertIs(batch_txn._get_snapshot(), snapshot) session.snapshot.assert_called_once_with( - read_timestamp=timestamp, exact_staleness=None, multi_use=True + read_timestamp=timestamp, + exact_staleness=None, + multi_use=True, + transaction_id=None, ) snapshot.begin.assert_called_once_with() @@ -2131,7 +2168,10 @@ def test__get_snapshot_w_exact_staleness(self): snapshot = session.snapshot.return_value = self._make_snapshot() self.assertIs(batch_txn._get_snapshot(), snapshot) session.snapshot.assert_called_once_with( - read_timestamp=None, exact_staleness=duration, multi_use=True + read_timestamp=None, + exact_staleness=duration, + multi_use=True, + transaction_id=None, ) snapshot.begin.assert_called_once_with() @@ -2183,6 +2223,7 @@ def test_generate_read_batches_w_max_partitions(self): "keyset": {"all": True}, "index": "", "data_boost_enabled": False, + "directed_read_options": None, } self.assertEqual(len(batches), len(self.TOKENS)) for batch, token in zip(batches, self.TOKENS): @@ -2225,6 +2266,7 @@ def test_generate_read_batches_w_retry_and_timeout_params(self): "keyset": {"all": True}, "index": "", "data_boost_enabled": False, + "directed_read_options": None, } self.assertEqual(len(batches), len(self.TOKENS)) for batch, token in zip(batches, self.TOKENS): @@ -2266,6 +2308,7 @@ def test_generate_read_batches_w_index_w_partition_size_bytes(self): "keyset": {"all": True}, "index": self.INDEX, "data_boost_enabled": False, + "directed_read_options": None, } self.assertEqual(len(batches), len(self.TOKENS)) for batch, token in zip(batches, self.TOKENS): @@ -2307,6 +2350,48 @@ def test_generate_read_batches_w_data_boost_enabled(self): "keyset": {"all": True}, "index": self.INDEX, "data_boost_enabled": True, + "directed_read_options": None, + } + self.assertEqual(len(batches), len(self.TOKENS)) + for batch, token in zip(batches, self.TOKENS): + self.assertEqual(batch["partition"], token) + self.assertEqual(batch["read"], expected_read) + + snapshot.partition_read.assert_called_once_with( + table=self.TABLE, + columns=self.COLUMNS, + keyset=keyset, + index=self.INDEX, + partition_size_bytes=None, + max_partitions=None, + retry=gapic_v1.method.DEFAULT, + timeout=gapic_v1.method.DEFAULT, + ) + + def test_generate_read_batches_w_directed_read_options(self): + keyset = self._make_keyset() + database = self._make_database() + batch_txn = self._make_one(database) + snapshot = batch_txn._snapshot = self._make_snapshot() + snapshot.partition_read.return_value = self.TOKENS + + batches = list( + batch_txn.generate_read_batches( + self.TABLE, + self.COLUMNS, + keyset, + index=self.INDEX, + directed_read_options=DIRECTED_READ_OPTIONS, + ) + ) + + expected_read = { + "table": self.TABLE, + "columns": self.COLUMNS, + "keyset": {"all": True}, + "index": self.INDEX, + "data_boost_enabled": False, + "directed_read_options": DIRECTED_READ_OPTIONS, } self.assertEqual(len(batches), len(self.TOKENS)) for batch, token in zip(batches, self.TOKENS): @@ -2404,6 +2489,7 @@ def test_generate_query_batches_w_max_partitions(self): "sql": sql, "data_boost_enabled": False, "query_options": client._query_options, + "directed_read_options": None, } self.assertEqual(len(batches), len(self.TOKENS)) for batch, token in zip(batches, self.TOKENS): @@ -2446,6 +2532,7 @@ def test_generate_query_batches_w_params_w_partition_size_bytes(self): "params": params, "param_types": param_types, "query_options": client._query_options, + "directed_read_options": None, } self.assertEqual(len(batches), len(self.TOKENS)) for batch, token in zip(batches, self.TOKENS): @@ -2493,6 +2580,7 @@ def test_generate_query_batches_w_retry_and_timeout_params(self): "params": params, "param_types": param_types, "query_options": client._query_options, + "directed_read_options": None, } self.assertEqual(len(batches), len(self.TOKENS)) for batch, token in zip(batches, self.TOKENS): @@ -2524,6 +2612,43 @@ def test_generate_query_batches_w_data_boost_enabled(self): "sql": sql, "data_boost_enabled": True, "query_options": client._query_options, + "directed_read_options": None, + } + self.assertEqual(len(batches), len(self.TOKENS)) + for batch, token in zip(batches, self.TOKENS): + self.assertEqual(batch["partition"], token) + self.assertEqual(batch["query"], expected_query) + + snapshot.partition_query.assert_called_once_with( + sql=sql, + params=None, + param_types=None, + partition_size_bytes=None, + max_partitions=None, + retry=gapic_v1.method.DEFAULT, + timeout=gapic_v1.method.DEFAULT, + ) + + def test_generate_query_batches_w_directed_read_options(self): + sql = "SELECT COUNT(*) FROM table_name" + client = _Client(self.PROJECT_ID) + instance = _Instance(self.INSTANCE_NAME, client=client) + database = _Database(self.DATABASE_NAME, instance=instance) + batch_txn = self._make_one(database) + snapshot = batch_txn._snapshot = self._make_snapshot() + snapshot.partition_query.return_value = self.TOKENS + + batches = list( + batch_txn.generate_query_batches( + sql, directed_read_options=DIRECTED_READ_OPTIONS + ) + ) + + expected_query = { + "sql": sql, + "data_boost_enabled": False, + "query_options": client._query_options, + "directed_read_options": DIRECTED_READ_OPTIONS, } self.assertEqual(len(batches), len(self.TOKENS)) for batch, token in zip(batches, self.TOKENS): @@ -2598,6 +2723,30 @@ def test_process_query_batch_w_retry_timeout(self): timeout=2.0, ) + def test_process_query_batch_w_directed_read_options(self): + sql = "SELECT first_name, last_name, email FROM citizens" + token = b"TOKEN" + batch = { + "partition": token, + "query": {"sql": sql, "directed_read_options": DIRECTED_READ_OPTIONS}, + } + database = self._make_database() + batch_txn = self._make_one(database) + snapshot = batch_txn._snapshot = self._make_snapshot() + expected = snapshot.execute_sql.return_value = object() + + found = batch_txn.process_query_batch(batch) + + self.assertIs(found, expected) + + snapshot.execute_sql.assert_called_once_with( + sql=sql, + partition=token, + retry=gapic_v1.method.DEFAULT, + timeout=gapic_v1.method.DEFAULT, + directed_read_options=DIRECTED_READ_OPTIONS, + ) + def test_close_wo_session(self): database = self._make_database() batch_txn = self._make_one(database) @@ -2683,6 +2832,179 @@ def test_process_w_query_batch(self): ) +class TestMutationGroupsCheckout(_BaseTest): + def _get_target_class(self): + from google.cloud.spanner_v1.database import MutationGroupsCheckout + + return MutationGroupsCheckout + + @staticmethod + def _make_spanner_client(): + from google.cloud.spanner_v1 import SpannerClient + + return mock.create_autospec(SpannerClient) + + def test_ctor(self): + from google.cloud.spanner_v1.batch import MutationGroups + + database = _Database(self.DATABASE_NAME) + pool = database._pool = _Pool() + session = _Session(database) + pool.put(session) + checkout = self._make_one(database) + self.assertIs(checkout._database, database) + + with checkout as groups: + self.assertIsNone(pool._session) + self.assertIsInstance(groups, MutationGroups) + self.assertIs(groups._session, session) + + self.assertIs(pool._session, session) + + def test_context_mgr_success(self): + import datetime + from google.cloud.spanner_v1._helpers import _make_list_value_pbs + from google.cloud.spanner_v1 import BatchWriteRequest + from google.cloud.spanner_v1 import BatchWriteResponse + from google.cloud.spanner_v1 import Mutation + from google.cloud._helpers import UTC + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.spanner_v1.batch import MutationGroups + from google.rpc.status_pb2 import Status + + now = datetime.datetime.utcnow().replace(tzinfo=UTC) + now_pb = _datetime_to_pb_timestamp(now) + status_pb = Status(code=200) + response = BatchWriteResponse( + commit_timestamp=now_pb, indexes=[0], status=status_pb + ) + database = _Database(self.DATABASE_NAME) + api = database.spanner_api = self._make_spanner_client() + api.batch_write.return_value = [response] + pool = database._pool = _Pool() + session = _Session(database) + pool.put(session) + checkout = self._make_one(database) + + request_options = RequestOptions(transaction_tag=self.TRANSACTION_TAG) + request = BatchWriteRequest( + session=self.SESSION_NAME, + mutation_groups=[ + BatchWriteRequest.MutationGroup( + mutations=[ + Mutation( + insert=Mutation.Write( + table="table", + columns=["col"], + values=_make_list_value_pbs([["val"]]), + ) + ) + ] + ) + ], + request_options=request_options, + ) + with checkout as groups: + self.assertIsNone(pool._session) + self.assertIsInstance(groups, MutationGroups) + self.assertIs(groups._session, session) + group = groups.group() + group.insert("table", ["col"], [["val"]]) + groups.batch_write(request_options) + self.assertEqual(groups.committed, True) + + self.assertIs(pool._session, session) + + api.batch_write.assert_called_once_with( + request=request, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ], + ) + + def test_context_mgr_failure(self): + from google.cloud.spanner_v1.batch import MutationGroups + + database = _Database(self.DATABASE_NAME) + pool = database._pool = _Pool() + session = _Session(database) + pool.put(session) + checkout = self._make_one(database) + + class Testing(Exception): + pass + + with self.assertRaises(Testing): + with checkout as groups: + self.assertIsNone(pool._session) + self.assertIsInstance(groups, MutationGroups) + self.assertIs(groups._session, session) + raise Testing() + + self.assertIs(pool._session, session) + + def test_context_mgr_session_not_found_error(self): + from google.cloud.exceptions import NotFound + + database = _Database(self.DATABASE_NAME) + session = _Session(database, name="session-1") + session.exists = mock.MagicMock(return_value=False) + pool = database._pool = _Pool() + new_session = _Session(database, name="session-2") + new_session.create = mock.MagicMock(return_value=[]) + pool._new_session = mock.MagicMock(return_value=new_session) + + pool.put(session) + checkout = self._make_one(database) + + self.assertEqual(pool._session, session) + with self.assertRaises(NotFound): + with checkout as _: + raise NotFound("Session not found") + # Assert that session-1 was removed from pool and new session was added. + self.assertEqual(pool._session, new_session) + + def test_context_mgr_table_not_found_error(self): + from google.cloud.exceptions import NotFound + + database = _Database(self.DATABASE_NAME) + session = _Session(database, name="session-1") + session.exists = mock.MagicMock(return_value=True) + pool = database._pool = _Pool() + pool._new_session = mock.MagicMock(return_value=[]) + + pool.put(session) + checkout = self._make_one(database) + + self.assertEqual(pool._session, session) + with self.assertRaises(NotFound): + with checkout as _: + raise NotFound("Table not found") + # Assert that session-1 was not removed from pool. + self.assertEqual(pool._session, session) + pool._new_session.assert_not_called() + + def test_context_mgr_unknown_error(self): + database = _Database(self.DATABASE_NAME) + session = _Session(database) + pool = database._pool = _Pool() + pool._new_session = mock.MagicMock(return_value=[]) + pool.put(session) + checkout = self._make_one(database) + + class Testing(Exception): + pass + + self.assertEqual(pool._session, session) + with self.assertRaises(Testing): + with checkout as _: + raise Testing("Unknown error.") + # Assert that session-1 was not removed from pool. + self.assertEqual(pool._session, session) + pool._new_session.assert_not_called() + + def _make_instance_api(): from google.cloud.spanner_admin_instance_v1 import InstanceAdminClient @@ -2690,7 +3012,12 @@ def _make_instance_api(): class _Client(object): - def __init__(self, project=TestDatabase.PROJECT_ID, route_to_leader_enabled=True): + def __init__( + self, + project=TestDatabase.PROJECT_ID, + route_to_leader_enabled=True, + directed_read_options=None, + ): from google.cloud.spanner_v1 import ExecuteSqlRequest self.project = project @@ -2701,6 +3028,7 @@ def __init__(self, project=TestDatabase.PROJECT_ID, route_to_leader_enabled=True self._client_options = mock.Mock() self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") self.route_to_leader_enabled = route_to_leader_enabled + self.directed_read_options = directed_read_options class _Instance(object): @@ -2727,6 +3055,7 @@ def __init__(self, name, instance=None): from logging import Logger self.logger = mock.create_autospec(Logger, instance=True) + self._directed_read_options = None class _Pool(object): @@ -2744,7 +3073,6 @@ def put(self, session): class _Session(object): - _rows = () _created = False _transaction = None diff --git a/tests/unit/test_instance.py b/tests/unit/test_instance.py index 0a7dbccb81..2313ee3131 100644 --- a/tests/unit/test_instance.py +++ b/tests/unit/test_instance.py @@ -17,7 +17,6 @@ class TestInstance(unittest.TestCase): - PROJECT = "project" PARENT = "projects/" + PROJECT INSTANCE_ID = "instance-id" @@ -1016,6 +1015,7 @@ def __init__(self, project, timeout_seconds=None): self.project_name = "projects/" + self.project self.timeout_seconds = timeout_seconds self.route_to_leader_enabled = True + self.directed_read_options = None def copy(self): from copy import deepcopy @@ -1031,7 +1031,6 @@ def __eq__(self, other): class _FauxInstanceAdminAPI(object): - _create_instance_conflict = False _instance_not_found = False _rpc_error = False diff --git a/tests/unit/test_keyset.py b/tests/unit/test_keyset.py index a7bad4070d..8fc743e075 100644 --- a/tests/unit/test_keyset.py +++ b/tests/unit/test_keyset.py @@ -205,7 +205,6 @@ def test_ctor_w_ranges(self): self.assertEqual(keyset.ranges, [range_1, range_2]) def test_ctor_w_all_and_keys(self): - with self.assertRaises(ValueError): self._make_one(all_=True, keys=[["key1"], ["key2"]]) diff --git a/tests/unit/test_packaging.py b/tests/unit/test_packaging.py new file mode 100644 index 0000000000..998a02ac2d --- /dev/null +++ b/tests/unit/test_packaging.py @@ -0,0 +1,37 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess +import sys + + +def test_namespace_package_compat(tmp_path): + # The ``google`` namespace package should not be masked + # by the presence of ``google-cloud-spanner``. + google = tmp_path / "google" + google.mkdir() + google.joinpath("othermod.py").write_text("") + env = dict(os.environ, PYTHONPATH=str(tmp_path)) + cmd = [sys.executable, "-m", "google.othermod"] + subprocess.check_call(cmd, env=env) + + # The ``google.cloud`` namespace package should not be masked + # by the presence of ``google-cloud-spanner``. + google_cloud = tmp_path / "google" / "cloud" + google_cloud.mkdir() + google_cloud.joinpath("othermod.py").write_text("") + env = dict(os.environ, PYTHONPATH=str(tmp_path)) + cmd = [sys.executable, "-m", "google.cloud.othermod"] + subprocess.check_call(cmd, env=env) diff --git a/tests/unit/test_pool.py b/tests/unit/test_pool.py index 58665634de..23ed3e7251 100644 --- a/tests/unit/test_pool.py +++ b/tests/unit/test_pool.py @@ -913,7 +913,6 @@ def _make_transaction(*args, **kw): @total_ordering class _Session(object): - _transaction = None def __init__(self, database, exists=True, transaction=None): @@ -1004,7 +1003,6 @@ def session(self, **kwargs): class _Queue(object): - _size = 1 def __init__(self, *items): @@ -1035,5 +1033,4 @@ def put_nowait(self, item, **kwargs): class _Pool(_Queue): - _database = None diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 3125e33f21..0bb02ebdc7 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -37,7 +37,6 @@ def time(self): class TestSession(OpenTelemetryBase): - PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID diff --git a/tests/unit/test_snapshot.py b/tests/unit/test_snapshot.py index 5d2afb4fe6..aec20c2f54 100644 --- a/tests/unit/test_snapshot.py +++ b/tests/unit/test_snapshot.py @@ -16,7 +16,7 @@ from google.api_core import gapic_v1 import mock -from google.cloud.spanner_v1 import RequestOptions +from google.cloud.spanner_v1 import RequestOptions, DirectedReadOptions from tests._helpers import ( OpenTelemetryBase, StatusCode, @@ -46,6 +46,33 @@ "db.instance": "testing", "net.host.name": "spanner.googleapis.com", } +DIRECTED_READ_OPTIONS = { + "include_replicas": { + "replica_selections": [ + { + "location": "us-west1", + "type_": DirectedReadOptions.ReplicaSelection.Type.READ_ONLY, + }, + ], + "auto_failover_disabled": True, + }, +} +DIRECTED_READ_OPTIONS_FOR_CLIENT = { + "include_replicas": { + "replica_selections": [ + { + "location": "us-east1", + }, + ], + }, +} + + +def _makeTimestamp(): + import datetime + from google.cloud._helpers import UTC + + return datetime.datetime.utcnow().replace(tzinfo=UTC) class Test_restart_on_unavailable(OpenTelemetryBase): @@ -56,7 +83,6 @@ def _getTargetClass(self): def _makeDerived(self, session): class _Derived(self._getTargetClass()): - _transaction_id = None _multi_use = False @@ -514,7 +540,6 @@ def test_iteration_w_multiple_span_creation(self): class Test_SnapshotBase(OpenTelemetryBase): - PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID @@ -533,7 +558,6 @@ def _make_one(self, session): def _makeDerived(self, session): class _Derived(self._getTargetClass()): - _transaction_id = None _multi_use = False @@ -603,6 +627,8 @@ def _read_helper( timeout=gapic_v1.method.DEFAULT, retry=gapic_v1.method.DEFAULT, request_options=None, + directed_read_options=None, + directed_read_options_at_client_level=None, ): from google.protobuf.struct_pb2 import Struct from google.cloud.spanner_v1 import ( @@ -642,7 +668,9 @@ def _read_helper( keyset = KeySet(keys=KEYS) INDEX = "email-address-index" LIMIT = 20 - database = _Database() + database = _Database( + directed_read_options=directed_read_options_at_client_level + ) api = database.spanner_api = self._make_spanner_api() api.streaming_read.return_value = _MockIterator(*result_sets) session = _Session(database) @@ -667,6 +695,7 @@ def _read_helper( retry=retry, timeout=timeout, request_options=request_options, + directed_read_options=directed_read_options, ) else: result_set = derived.read( @@ -678,6 +707,7 @@ def _read_helper( retry=retry, timeout=timeout, request_options=request_options, + directed_read_options=directed_read_options, ) self.assertEqual(derived._read_request_count, count + 1) @@ -712,6 +742,12 @@ def _read_helper( expected_request_options = request_options expected_request_options.transaction_tag = None + expected_directed_read_options = ( + directed_read_options + if directed_read_options is not None + else directed_read_options_at_client_level + ) + expected_request = ReadRequest( session=self.SESSION_NAME, table=TABLE_NAME, @@ -722,6 +758,7 @@ def _read_helper( limit=expected_limit, partition_token=partition, request_options=expected_request_options, + directed_read_options=expected_directed_read_options, ) api.streaming_read.assert_called_once_with( request=expected_request, @@ -797,6 +834,22 @@ def test_read_w_timeout_and_retry_params(self): multi_use=True, first=False, retry=Retry(deadline=60), timeout=2.0 ) + def test_read_w_directed_read_options(self): + self._read_helper(multi_use=False, directed_read_options=DIRECTED_READ_OPTIONS) + + def test_read_w_directed_read_options_at_client_level(self): + self._read_helper( + multi_use=False, + directed_read_options_at_client_level=DIRECTED_READ_OPTIONS_FOR_CLIENT, + ) + + def test_read_w_directed_read_options_override(self): + self._read_helper( + multi_use=False, + directed_read_options=DIRECTED_READ_OPTIONS, + directed_read_options_at_client_level=DIRECTED_READ_OPTIONS_FOR_CLIENT, + ) + def test_execute_sql_other_error(self): database = _Database() database.spanner_api = self._make_spanner_api() @@ -836,6 +889,8 @@ def _execute_sql_helper( request_options=None, timeout=gapic_v1.method.DEFAULT, retry=gapic_v1.method.DEFAULT, + directed_read_options=None, + directed_read_options_at_client_level=None, ): from google.protobuf.struct_pb2 import Struct from google.cloud.spanner_v1 import ( @@ -876,7 +931,9 @@ def _execute_sql_helper( for i in range(len(result_sets)): result_sets[i].values.extend(VALUE_PBS[i]) iterator = _MockIterator(*result_sets) - database = _Database() + database = _Database( + directed_read_options=directed_read_options_at_client_level + ) api = database.spanner_api = self._make_spanner_api() api.execute_streaming_sql.return_value = iterator session = _Session(database) @@ -902,6 +959,7 @@ def _execute_sql_helper( partition=partition, retry=retry, timeout=timeout, + directed_read_options=directed_read_options, ) self.assertEqual(derived._read_request_count, count + 1) @@ -942,6 +1000,12 @@ def _execute_sql_helper( expected_request_options = request_options expected_request_options.transaction_tag = None + expected_directed_read_options = ( + directed_read_options + if directed_read_options is not None + else directed_read_options_at_client_level + ) + expected_request = ExecuteSqlRequest( session=self.SESSION_NAME, sql=SQL_QUERY_WITH_PARAM, @@ -953,6 +1017,7 @@ def _execute_sql_helper( request_options=expected_request_options, partition_token=partition, seqno=sql_count, + directed_read_options=expected_directed_read_options, ) api.execute_streaming_sql.assert_called_once_with( request=expected_request, @@ -1039,6 +1104,24 @@ def test_execute_sql_w_incorrect_tag_dictionary_error(self): with self.assertRaises(ValueError): self._execute_sql_helper(multi_use=False, request_options=request_options) + def test_execute_sql_w_directed_read_options(self): + self._execute_sql_helper( + multi_use=False, directed_read_options=DIRECTED_READ_OPTIONS + ) + + def test_execute_sql_w_directed_read_options_at_client_level(self): + self._execute_sql_helper( + multi_use=False, + directed_read_options_at_client_level=DIRECTED_READ_OPTIONS_FOR_CLIENT, + ) + + def test_execute_sql_w_directed_read_options_override(self): + self._execute_sql_helper( + multi_use=False, + directed_read_options=DIRECTED_READ_OPTIONS, + directed_read_options_at_client_level=DIRECTED_READ_OPTIONS_FOR_CLIENT, + ) + def _partition_read_helper( self, multi_use, @@ -1358,7 +1441,6 @@ def test_partition_query_ok_w_timeout_and_retry_params(self): class TestSnapshot(OpenTelemetryBase): - PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID @@ -1380,12 +1462,6 @@ def _make_spanner_api(self): return mock.create_autospec(SpannerClient, instance=True) - def _makeTimestamp(self): - import datetime - from google.cloud._helpers import UTC - - return datetime.datetime.utcnow().replace(tzinfo=UTC) - def _makeDuration(self, seconds=1, microseconds=0): import datetime @@ -1403,7 +1479,7 @@ def test_ctor_defaults(self): self.assertFalse(snapshot._multi_use) def test_ctor_w_multiple_options(self): - timestamp = self._makeTimestamp() + timestamp = _makeTimestamp() duration = self._makeDuration() session = _Session() @@ -1411,7 +1487,7 @@ def test_ctor_w_multiple_options(self): self._make_one(session, read_timestamp=timestamp, max_staleness=duration) def test_ctor_w_read_timestamp(self): - timestamp = self._makeTimestamp() + timestamp = _makeTimestamp() session = _Session() snapshot = self._make_one(session, read_timestamp=timestamp) self.assertIs(snapshot._session, session) @@ -1423,7 +1499,7 @@ def test_ctor_w_read_timestamp(self): self.assertFalse(snapshot._multi_use) def test_ctor_w_min_read_timestamp(self): - timestamp = self._makeTimestamp() + timestamp = _makeTimestamp() session = _Session() snapshot = self._make_one(session, min_read_timestamp=timestamp) self.assertIs(snapshot._session, session) @@ -1470,7 +1546,7 @@ def test_ctor_w_multi_use(self): self.assertTrue(snapshot._multi_use) def test_ctor_w_multi_use_and_read_timestamp(self): - timestamp = self._makeTimestamp() + timestamp = _makeTimestamp() session = _Session() snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True) self.assertTrue(snapshot._session is session) @@ -1482,7 +1558,7 @@ def test_ctor_w_multi_use_and_read_timestamp(self): self.assertTrue(snapshot._multi_use) def test_ctor_w_multi_use_and_min_read_timestamp(self): - timestamp = self._makeTimestamp() + timestamp = _makeTimestamp() session = _Session() with self.assertRaises(ValueError): @@ -1524,7 +1600,7 @@ def test__make_txn_selector_strong(self): def test__make_txn_selector_w_read_timestamp(self): from google.cloud._helpers import _pb_timestamp_to_datetime - timestamp = self._makeTimestamp() + timestamp = _makeTimestamp() session = _Session() snapshot = self._make_one(session, read_timestamp=timestamp) selector = snapshot._make_txn_selector() @@ -1539,7 +1615,7 @@ def test__make_txn_selector_w_read_timestamp(self): def test__make_txn_selector_w_min_read_timestamp(self): from google.cloud._helpers import _pb_timestamp_to_datetime - timestamp = self._makeTimestamp() + timestamp = _makeTimestamp() session = _Session() snapshot = self._make_one(session, min_read_timestamp=timestamp) selector = snapshot._make_txn_selector() @@ -1583,7 +1659,7 @@ def test__make_txn_selector_strong_w_multi_use(self): def test__make_txn_selector_w_read_timestamp_w_multi_use(self): from google.cloud._helpers import _pb_timestamp_to_datetime - timestamp = self._makeTimestamp() + timestamp = _makeTimestamp() session = _Session() snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True) selector = snapshot._make_txn_selector() @@ -1630,7 +1706,7 @@ def test_begin_w_other_error(self): database = _Database() database.spanner_api = self._make_spanner_api() database.spanner_api.begin_transaction.side_effect = RuntimeError() - timestamp = self._makeTimestamp() + timestamp = _makeTimestamp() session = _Session(database) snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True) @@ -1655,7 +1731,7 @@ def test_begin_w_retry(self): InternalServerError("Received unexpected EOS on DATA frame from server"), TransactionPB(id=TXN_ID), ] - timestamp = self._makeTimestamp() + timestamp = _makeTimestamp() session = _Session(database) snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True) @@ -1684,7 +1760,9 @@ def test_begin_ok_exact_staleness(self): expected_duration = Duration(seconds=SECONDS, nanos=MICROS * 1000) expected_txn_options = TransactionOptions( - read_only=TransactionOptions.ReadOnly(exact_staleness=expected_duration) + read_only=TransactionOptions.ReadOnly( + exact_staleness=expected_duration, return_read_timestamp=True + ) ) api.begin_transaction.assert_called_once_with( @@ -1718,7 +1796,9 @@ def test_begin_ok_exact_strong(self): self.assertEqual(snapshot._transaction_id, TXN_ID) expected_txn_options = TransactionOptions( - read_only=TransactionOptions.ReadOnly(strong=True) + read_only=TransactionOptions.ReadOnly( + strong=True, return_read_timestamp=True + ) ) api.begin_transaction.assert_called_once_with( @@ -1747,10 +1827,11 @@ def __init__(self): class _Database(object): - def __init__(self): + def __init__(self, directed_read_options=None): self.name = "testing" self._instance = _Instance() self._route_to_leader_enabled = True + self._directed_read_options = directed_read_options class _Session(object): diff --git a/tests/unit/test_spanner.py b/tests/unit/test_spanner.py index e4cd1e84cd..3663d8bdc9 100644 --- a/tests/unit/test_spanner.py +++ b/tests/unit/test_spanner.py @@ -28,6 +28,7 @@ StructType, TransactionOptions, TransactionSelector, + DirectedReadOptions, ExecuteBatchDmlRequest, ExecuteBatchDmlResponse, param_types, @@ -73,7 +74,17 @@ MODE = 2 RETRY = gapic_v1.method.DEFAULT TIMEOUT = gapic_v1.method.DEFAULT -REQUEST_OPTIONS = RequestOptions() +DIRECTED_READ_OPTIONS = { + "include_replicas": { + "replica_selections": [ + { + "location": "us-west1", + "type_": DirectedReadOptions.ReplicaSelection.Type.READ_ONLY, + }, + ], + "auto_failover_disabled": True, + }, +} insert_dml = "INSERT INTO table(pkey, desc) VALUES (%pkey, %desc)" insert_params = {"pkey": 12345, "desc": "DESCRIPTION"} insert_param_types = {"pkey": param_types.INT64, "desc": param_types.STRING} @@ -88,7 +99,6 @@ class TestTransaction(OpenTelemetryBase): - PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID @@ -143,7 +153,7 @@ def _execute_update_helper( PARAM_TYPES, query_mode=MODE, query_options=query_options, - request_options=REQUEST_OPTIONS, + request_options=RequestOptions(), retry=RETRY, timeout=TIMEOUT, ) @@ -168,7 +178,7 @@ def _execute_update_expected_request( expected_query_options = _merge_query_options( expected_query_options, query_options ) - expected_request_options = REQUEST_OPTIONS + expected_request_options = RequestOptions() expected_request_options.transaction_tag = self.TRANSACTION_TAG expected_request = ExecuteSqlRequest( @@ -193,6 +203,7 @@ def _execute_sql_helper( partition=None, sql_count=0, query_options=None, + directed_read_options=None, ): VALUES = [["bharney", "rhubbyl", 31], ["phred", "phlyntstone", 32]] VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES] @@ -227,10 +238,11 @@ def _execute_sql_helper( PARAM_TYPES, query_mode=MODE, query_options=query_options, - request_options=REQUEST_OPTIONS, + request_options=RequestOptions(), partition=partition, retry=RETRY, timeout=TIMEOUT, + directed_read_options=directed_read_options, ) self.assertEqual(transaction._read_request_count, count + 1) @@ -241,7 +253,14 @@ def _execute_sql_helper( self.assertEqual(transaction._execute_sql_count, sql_count + 1) def _execute_sql_expected_request( - self, database, partition=None, query_options=None, begin=True, sql_count=0 + self, + database, + partition=None, + query_options=None, + begin=True, + sql_count=0, + transaction_tag=False, + directed_read_options=None, ): if begin is True: expected_transaction = TransactionSelector( @@ -260,8 +279,12 @@ def _execute_sql_expected_request( expected_query_options, query_options ) - expected_request_options = REQUEST_OPTIONS - expected_request_options.transaction_tag = self.TRANSACTION_TAG + expected_request_options = RequestOptions() + + if transaction_tag is True: + expected_request_options.transaction_tag = self.TRANSACTION_TAG + else: + expected_request_options.transaction_tag = None expected_request = ExecuteSqlRequest( session=self.SESSION_NAME, @@ -274,6 +297,7 @@ def _execute_sql_expected_request( request_options=expected_request_options, partition_token=partition, seqno=sql_count, + directed_read_options=directed_read_options, ) return expected_request @@ -284,6 +308,7 @@ def _read_helper( api, count=0, partition=None, + directed_read_options=None, ): VALUES = [["bharney", 31], ["phred", 32]] VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES] @@ -321,7 +346,8 @@ def _read_helper( partition=partition, retry=RETRY, timeout=TIMEOUT, - request_options=REQUEST_OPTIONS, + request_options=RequestOptions(), + directed_read_options=directed_read_options, ) else: result_set = transaction.read( @@ -332,7 +358,8 @@ def _read_helper( limit=LIMIT, retry=RETRY, timeout=TIMEOUT, - request_options=REQUEST_OPTIONS, + request_options=RequestOptions(), + directed_read_options=directed_read_options, ) self.assertEqual(transaction._read_request_count, count + 1) @@ -343,8 +370,14 @@ def _read_helper( self.assertEqual(result_set.metadata, metadata_pb) self.assertEqual(result_set.stats, stats_pb) - def _read_helper_expected_request(self, partition=None, begin=True, count=0): - + def _read_helper_expected_request( + self, + partition=None, + begin=True, + count=0, + transaction_tag=False, + directed_read_options=None, + ): if begin is True: expected_transaction = TransactionSelector( begin=TransactionOptions(read_write=TransactionOptions.ReadWrite()) @@ -358,8 +391,12 @@ def _read_helper_expected_request(self, partition=None, begin=True, count=0): expected_limit = LIMIT # Transaction tag is ignored for read request. - expected_request_options = REQUEST_OPTIONS - expected_request_options.transaction_tag = self.TRANSACTION_TAG + expected_request_options = RequestOptions() + + if transaction_tag is True: + expected_request_options.transaction_tag = self.TRANSACTION_TAG + else: + expected_request_options.transaction_tag = None expected_request = ReadRequest( session=self.SESSION_NAME, @@ -371,6 +408,7 @@ def _read_helper_expected_request(self, partition=None, begin=True, count=0): limit=expected_limit, partition_token=partition, request_options=expected_request_options, + directed_read_options=directed_read_options, ) return expected_request @@ -412,7 +450,7 @@ def _batch_update_helper( transaction._execute_sql_count = count status, row_counts = transaction.batch_update( - dml_statements, request_options=REQUEST_OPTIONS + dml_statements, request_options=RequestOptions() ) self.assertEqual(status, expected_status) @@ -442,7 +480,7 @@ def _batch_update_expected_request(self, begin=True, count=0): ExecuteBatchDmlRequest.Statement(sql=delete_dml), ] - expected_request_options = REQUEST_OPTIONS + expected_request_options = RequestOptions() expected_request_options.transaction_tag = self.TRANSACTION_TAG expected_request = ExecuteBatchDmlRequest( @@ -597,7 +635,9 @@ def test_transaction_should_use_transaction_id_returned_by_first_update(self): self._execute_sql_helper(transaction=transaction, api=api) api.execute_streaming_sql.assert_called_once_with( - request=self._execute_sql_expected_request(database=database, begin=False), + request=self._execute_sql_expected_request( + database=database, begin=False, transaction_tag=True + ), retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT, metadata=[ @@ -606,6 +646,52 @@ def test_transaction_should_use_transaction_id_returned_by_first_update(self): ], ) + def test_transaction_execute_sql_w_directed_read_options(self): + database = _Database() + session = _Session(database) + api = database.spanner_api = self._make_spanner_api() + transaction = self._make_one(session) + + self._execute_sql_helper( + transaction=transaction, + api=api, + directed_read_options=DIRECTED_READ_OPTIONS, + ) + api.execute_streaming_sql.assert_called_once_with( + request=self._execute_sql_expected_request( + database=database, directed_read_options=DIRECTED_READ_OPTIONS + ), + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ], + retry=gapic_v1.method.DEFAULT, + timeout=gapic_v1.method.DEFAULT, + ) + + def test_transaction_streaming_read_w_directed_read_options(self): + database = _Database() + session = _Session(database) + api = database.spanner_api = self._make_spanner_api() + transaction = self._make_one(session) + + self._read_helper( + transaction=transaction, + api=api, + directed_read_options=DIRECTED_READ_OPTIONS, + ) + api.streaming_read.assert_called_once_with( + request=self._read_helper_expected_request( + directed_read_options=DIRECTED_READ_OPTIONS + ), + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ], + retry=RETRY, + timeout=TIMEOUT, + ) + def test_transaction_should_use_transaction_id_returned_by_first_read(self): database = _Database() session = _Session(database) @@ -646,7 +732,9 @@ def test_transaction_should_use_transaction_id_returned_by_first_batch_update(se ) self._read_helper(transaction=transaction, api=api) api.streaming_read.assert_called_once_with( - request=self._read_helper_expected_request(begin=False), + request=self._read_helper_expected_request( + begin=False, transaction_tag=True + ), metadata=[ ("google-cloud-resource-prefix", database.name), ("x-goog-spanner-route-to-leader", "true"), @@ -924,6 +1012,7 @@ def __init__(self): from google.cloud.spanner_v1 import ExecuteSqlRequest self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") + self.directed_read_options = None class _Instance(object): @@ -936,10 +1025,10 @@ def __init__(self): self.name = "testing" self._instance = _Instance() self._route_to_leader_enabled = True + self._directed_read_options = None class _Session(object): - _transaction = None def __init__(self, database=None, name=TestTransaction.SESSION_NAME): diff --git a/tests/unit/test_streamed.py b/tests/unit/test_streamed.py index 2714ddfb45..85dcb40026 100644 --- a/tests/unit/test_streamed.py +++ b/tests/unit/test_streamed.py @@ -973,7 +973,6 @@ def test___iter___w_existing_rows_read(self): class _MockCancellableIterator(object): - cancel_calls = 0 def __init__(self, *values): @@ -987,7 +986,6 @@ def __next__(self): # pragma: NO COVER Py3k class TestStreamedResultSet_JSON_acceptance_tests(unittest.TestCase): - _json_tests = None def _getTargetClass(self): @@ -1006,7 +1004,7 @@ def _load_json_test(self, test_name): filename = os.path.join(dirname, "streaming-read-acceptance-test.json") raw = _parse_streaming_read_acceptance_tests(filename) tests = self.__class__._json_tests = {} - for (name, partial_result_sets, results) in raw: + for name, partial_result_sets, results in raw: tests[name] = partial_result_sets, results return self.__class__._json_tests[test_name] diff --git a/tests/unit/test_transaction.py b/tests/unit/test_transaction.py index 85359dac19..2d2f208424 100644 --- a/tests/unit/test_transaction.py +++ b/tests/unit/test_transaction.py @@ -42,7 +42,6 @@ class TestTransaction(OpenTelemetryBase): - PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID @@ -895,6 +894,7 @@ def __init__(self): from google.cloud.spanner_v1 import ExecuteSqlRequest self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") + self.directed_read_options = None class _Instance(object): @@ -907,10 +907,10 @@ def __init__(self): self.name = "testing" self._instance = _Instance() self._route_to_leader_enabled = True + self._directed_read_options = None class _Session(object): - _transaction = None def __init__(self, database=None, name=TestTransaction.SESSION_NAME): @@ -919,7 +919,6 @@ def __init__(self, database=None, name=TestTransaction.SESSION_NAME): class _FauxSpannerAPI(object): - _committed = None def __init__(self, **kwargs):