diff --git a/.coveragerc b/.coveragerc
index dd39c8546c..8e75debec9 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -17,6 +17,9 @@
# Generated by synthtool. DO NOT EDIT!
[run]
branch = True
+omit =
+ /tmp/*
+ .nox/*
[report]
fail_under = 100
@@ -29,7 +32,9 @@ exclude_lines =
# Ignore abstract methods
raise NotImplementedError
omit =
+ /tmp/*
+ .nox/*
*/gapic/*.py
*/proto/*.py
*/core/*.py
- */site-packages/*.py
\ No newline at end of file
+ */site-packages/*.py
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
new file mode 100644
index 0000000000..9d27c2353f
--- /dev/null
+++ b/.devcontainer/Dockerfile
@@ -0,0 +1,16 @@
+ARG VARIANT="3.13"
+FROM mcr.microsoft.com/devcontainers/python:${VARIANT}
+
+#install nox
+COPY requirements.txt /requirements.txt
+RUN python3 -m pip install --upgrade --quiet --require-hashes -r requirements.txt
+
+# install gh
+RUN curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \
+&& chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \
+&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
+&& apt-get update \
+&& apt-get install gh -y
+
+# install gloud sdk
+RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && apt-get update -y && apt-get install google-cloud-cli -y
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 0000000000..7b0126cb8a
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,13 @@
+// For format details, see https://aka.ms/devcontainer.json. For config options, see the
+// README at: https://github.com/devcontainers/templates/tree/main/src/python
+{
+ "name": "Python 3",
+ "build": {
+ // Sets the run context to one level up instead of the .devcontainer folder.
+ "args": { "VARIANT": "3.8" },
+ // Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
+ "dockerfile": "Dockerfile"
+ },
+
+ "postCreateCommand": "bash .devcontainer/postCreate.sh"
+}
diff --git a/.devcontainer/postCreate.sh b/.devcontainer/postCreate.sh
new file mode 100644
index 0000000000..ee79ebd221
--- /dev/null
+++ b/.devcontainer/postCreate.sh
@@ -0,0 +1,3 @@
+echo "Post Create Starting"
+
+nox -s blacken
\ No newline at end of file
diff --git a/.devcontainer/requirements.in b/.devcontainer/requirements.in
new file mode 100644
index 0000000000..7c41e5e241
--- /dev/null
+++ b/.devcontainer/requirements.in
@@ -0,0 +1 @@
+nox==2024.10.9
\ No newline at end of file
diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt
new file mode 100644
index 0000000000..ac5aae60d9
--- /dev/null
+++ b/.devcontainer/requirements.txt
@@ -0,0 +1,72 @@
+#
+# This file is autogenerated by pip-compile with Python 3.8
+# by the following command:
+#
+# pip-compile --generate-hashes requirements.in
+#
+argcomplete==3.6.2 \
+ --hash=sha256:65b3133a29ad53fb42c48cf5114752c7ab66c1c38544fdf6460f450c09b42591 \
+ --hash=sha256:d0519b1bc867f5f4f4713c41ad0aba73a4a5f007449716b16f385f2166dc6adf
+ # via nox
+colorlog==6.9.0 \
+ --hash=sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff \
+ --hash=sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2
+ # via nox
+distlib==0.4.0 \
+ --hash=sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16 \
+ --hash=sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d
+ # via virtualenv
+filelock==3.19.1 \
+ --hash=sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58 \
+ --hash=sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d
+ # via virtualenv
+nox==2025.5.1 \
+ --hash=sha256:2a571dfa7a58acc726521ac3cd8184455ebcdcbf26401c7b737b5bc6701427b2 \
+ --hash=sha256:56abd55cf37ff523c254fcec4d152ed51e5fe80e2ab8317221d8b828ac970a31
+ # via -r requirements.in
+packaging==25.0 \
+ --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \
+ --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f
+ # via nox
+platformdirs==4.4.0 \
+ --hash=sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85 \
+ --hash=sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf
+ # via virtualenv
+tomli==2.2.1 \
+ --hash=sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6 \
+ --hash=sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd \
+ --hash=sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c \
+ --hash=sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b \
+ --hash=sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8 \
+ --hash=sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6 \
+ --hash=sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77 \
+ --hash=sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff \
+ --hash=sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea \
+ --hash=sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192 \
+ --hash=sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249 \
+ --hash=sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee \
+ --hash=sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4 \
+ --hash=sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98 \
+ --hash=sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8 \
+ --hash=sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4 \
+ --hash=sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281 \
+ --hash=sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744 \
+ --hash=sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69 \
+ --hash=sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13 \
+ --hash=sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140 \
+ --hash=sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e \
+ --hash=sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e \
+ --hash=sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc \
+ --hash=sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff \
+ --hash=sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec \
+ --hash=sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2 \
+ --hash=sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222 \
+ --hash=sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106 \
+ --hash=sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272 \
+ --hash=sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a \
+ --hash=sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7
+ # via nox
+virtualenv==20.34.0 \
+ --hash=sha256:341f5afa7eee943e4984a9207c025feedd768baff6753cd660c857ceb3e36026 \
+ --hash=sha256:44815b2c9dee7ed86e387b842a84f20b93f7f417f95886ca1996a72a4138eb1a
+ # via nox
diff --git a/.flake8 b/.flake8
index 29227d4cf4..32986c7928 100644
--- a/.flake8
+++ b/.flake8
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-# Copyright 2020 Google LLC
+# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
# Generated by synthtool. DO NOT EDIT!
[flake8]
-ignore = E203, E266, E501, W503
+ignore = E203, E231, E266, E501, W503
exclude =
# Exclude generated code.
**/proto/**
diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml
index 9ee60f7e48..508ba98efe 100644
--- a/.github/.OwlBot.lock.yaml
+++ b/.github/.OwlBot.lock.yaml
@@ -1,3 +1,17 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
docker:
- image: gcr.io/repo-automation-bots/owlbot-python:latest
- digest: sha256:aea14a583128771ae8aefa364e1652f3c56070168ef31beb203534222d842b8b
+ image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
+ digest: sha256:25de45b58e52021d3a24a6273964371a97a4efeefe6ad3845a64e697c63b6447
+# created: 2025-04-14T14:34:43.260858345Z
diff --git a/.github/.OwlBot.yaml b/.github/.OwlBot.yaml
index d60aca5ff1..5db16e2a9d 100644
--- a/.github/.OwlBot.yaml
+++ b/.github/.OwlBot.yaml
@@ -13,7 +13,7 @@
# limitations under the License.
docker:
- image: gcr.io/repo-automation-bots/owlbot-python:latest
+ image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
deep-remove-regex:
- /owl-bot-staging
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 47eb5c354d..07f48edc31 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -3,9 +3,10 @@
#
# For syntax help see:
# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax
+# Note: This file is autogenerated. To make changes to the codeowner team, please update .repo-metadata.json.
+# @googleapis/yoshi-python @googleapis/spanner-client-libraries-python are the default owners for changes in this repo
+* @googleapis/yoshi-python @googleapis/spanner-client-libraries-python
-# The api-spanner-python team is the default owner for anything not
-# explicitly taken by someone else.
-* @googleapis/api-spanner-python
-/samples/ @googleapis/api-spanner-python @googleapis/python-samples-owners
\ No newline at end of file
+# @googleapis/python-samples-reviewers @googleapis/spanner-client-libraries-python are the default owners for samples changes
+/samples/ @googleapis/python-samples-reviewers @googleapis/spanner-client-libraries-python
diff --git a/.github/auto-approve.yml b/.github/auto-approve.yml
new file mode 100644
index 0000000000..311ebbb853
--- /dev/null
+++ b/.github/auto-approve.yml
@@ -0,0 +1,3 @@
+# https://github.com/googleapis/repo-automation-bots/tree/main/packages/auto-approve
+processes:
+ - "OwlBotTemplateChanges"
diff --git a/.github/auto-label.yaml b/.github/auto-label.yaml
new file mode 100644
index 0000000000..21786a4eb0
--- /dev/null
+++ b/.github/auto-label.yaml
@@ -0,0 +1,20 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+requestsize:
+ enabled: true
+
+path:
+ pullrequest: true
+ paths:
+ samples: "samples"
diff --git a/.github/blunderbuss.yml b/.github/blunderbuss.yml
index 1dfef96e3d..97a6f7439f 100644
--- a/.github/blunderbuss.yml
+++ b/.github/blunderbuss.yml
@@ -1,2 +1,17 @@
+# Blunderbuss config
+#
+# This file controls who is assigned for pull requests and issues.
+# Note: This file is autogenerated. To make changes to the assignee
+# team, please update `codeowner_team` in `.repo-metadata.json`.
assign_issues:
- - larkee
\ No newline at end of file
+ - googleapis/spanner-client-libraries-python
+
+assign_issues_by:
+ - labels:
+ - "samples"
+ to:
+ - googleapis/python-samples-reviewers
+ - googleapis/spanner-client-libraries-python
+
+assign_prs:
+ - googleapis/spanner-client-libraries-python
diff --git a/.github/release-please.yml b/.github/release-please.yml
index 4507ad0598..dbd2cc9deb 100644
--- a/.github/release-please.yml
+++ b/.github/release-please.yml
@@ -1 +1,15 @@
releaseType: python
+handleGHRelease: true
+manifest: true
+# NOTE: this section is generated by synthtool.languages.python
+# See https://github.com/googleapis/synthtool/blob/master/synthtool/languages/python.py
+branches:
+- branch: v2
+ handleGHRelease: true
+ releaseType: python
+- branch: v1
+ handleGHRelease: true
+ releaseType: python
+- branch: v0
+ handleGHRelease: true
+ releaseType: python
diff --git a/.github/release-trigger.yml b/.github/release-trigger.yml
new file mode 100644
index 0000000000..3c0f1bfc7e
--- /dev/null
+++ b/.github/release-trigger.yml
@@ -0,0 +1,2 @@
+enabled: true
+multiScmName: python-spanner
diff --git a/.github/sync-repo-settings.yaml b/.github/sync-repo-settings.yaml
index 0ddb512dba..d726d1193d 100644
--- a/.github/sync-repo-settings.yaml
+++ b/.github/sync-repo-settings.yaml
@@ -1,15 +1,15 @@
-# https://github.com/googleapis/repo-automation-bots/tree/master/packages/sync-repo-settings
-# Rules for master branch protection
+# https://github.com/googleapis/repo-automation-bots/tree/main/packages/sync-repo-settings
+# Rules for main branch protection
branchProtectionRules:
# Identifies the protection rule pattern. Name of the branch to be protected.
-# Defaults to `master`
-- pattern: master
+# Defaults to `main`
+- pattern: main
requiresCodeOwnerReviews: true
requiresStrictStatusChecks: true
requiredStatusCheckContexts:
- 'Kokoro'
+ - 'Kokoro system-3.12'
- 'cla/google'
- 'Samples - Lint'
- - 'Samples - Python 3.6'
- - 'Samples - Python 3.7'
- - 'Samples - Python 3.8'
+ - 'Samples - Python 3.9'
+ - 'Samples - Python 3.12'
diff --git a/.github/workflows/integration-tests-against-emulator-with-regular-session.yaml b/.github/workflows/integration-tests-against-emulator-with-regular-session.yaml
new file mode 100644
index 0000000000..826a3b7629
--- /dev/null
+++ b/.github/workflows/integration-tests-against-emulator-with-regular-session.yaml
@@ -0,0 +1,35 @@
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+name: Run Spanner integration tests against emulator with regular sessions
+jobs:
+ system-tests:
+ runs-on: ubuntu-latest
+
+ services:
+ emulator:
+ image: gcr.io/cloud-spanner-emulator/emulator:latest
+ ports:
+ - 9010:9010
+ - 9020:9020
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v5
+ - name: Setup Python
+ uses: actions/setup-python@v6
+ with:
+ python-version: 3.13
+ - name: Install nox
+ run: python -m pip install nox
+ - name: Run system tests
+ run: nox -s system
+ env:
+ SPANNER_EMULATOR_HOST: localhost:9010
+ GOOGLE_CLOUD_PROJECT: emulator-test-project
+ GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE: true
+ GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS: false
+ GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS: false
+ GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW: false
diff --git a/.github/workflows/integration-tests-against-emulator.yaml b/.github/workflows/integration-tests-against-emulator.yaml
index 803064a38e..e7158307b8 100644
--- a/.github/workflows/integration-tests-against-emulator.yaml
+++ b/.github/workflows/integration-tests-against-emulator.yaml
@@ -1,7 +1,7 @@
on:
push:
branches:
- - master
+ - main
pull_request:
name: Run Spanner integration tests against emulator
jobs:
@@ -10,18 +10,18 @@ jobs:
services:
emulator:
- image: gcr.io/cloud-spanner-emulator/emulator:latest
+ image: gcr.io/cloud-spanner-emulator/emulator
ports:
- 9010:9010
- 9020:9020
steps:
- name: Checkout code
- uses: actions/checkout@v2
+ uses: actions/checkout@v5
- name: Setup Python
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v6
with:
- python-version: 3.8
+ python-version: 3.13
- name: Install nox
run: python -m pip install nox
- name: Run system tests
diff --git a/.github/workflows/mock_server_tests.yaml b/.github/workflows/mock_server_tests.yaml
new file mode 100644
index 0000000000..b705c98191
--- /dev/null
+++ b/.github/workflows/mock_server_tests.yaml
@@ -0,0 +1,21 @@
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+name: Run Spanner tests against an in-mem mock server
+jobs:
+ mock-server-tests:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v5
+ - name: Setup Python
+ uses: actions/setup-python@v6
+ with:
+ python-version: 3.13
+ - name: Install nox
+ run: python -m pip install nox
+ - name: Run mock server tests
+ run: nox -s mockserver
diff --git a/.github/workflows/presubmit.yaml b/.github/workflows/presubmit.yaml
new file mode 100644
index 0000000000..67db6136d1
--- /dev/null
+++ b/.github/workflows/presubmit.yaml
@@ -0,0 +1,42 @@
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+name: Presubmit checks
+permissions:
+ contents: read
+ pull-requests: write
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v5
+ - name: Setup Python
+ uses: actions/setup-python@v6
+ with:
+ python-version: 3.13
+ - name: Install nox
+ run: python -m pip install nox
+ - name: Check formatting
+ run: nox -s lint
+ units:
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ python: ["3.9", "3.10", "3.11", "3.12", "3.13"]
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v5
+ - name: Setup Python
+ uses: actions/setup-python@v6
+ with:
+ python-version: ${{matrix.python}}
+ - name: Install nox
+ run: python -m pip install nox
+ - name: Run unit tests
+ run: nox -s unit-${{matrix.python}}
diff --git a/.gitignore b/.gitignore
index b4243ced74..d083ea1ddc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -50,6 +50,7 @@ docs.metadata
# Virtual environment
env/
+venv/
# Test logs
coverage.xml
diff --git a/.kokoro/build.sh b/.kokoro/build.sh
index 2d206c3a1c..6c576c55bf 100755
--- a/.kokoro/build.sh
+++ b/.kokoro/build.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2018 Google LLC
+# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,11 +15,13 @@
set -eo pipefail
+CURRENT_DIR=$(dirname "${BASH_SOURCE[0]}")
+
if [[ -z "${PROJECT_ROOT:-}" ]]; then
- PROJECT_ROOT="github/python-spanner"
+ PROJECT_ROOT=$(realpath "${CURRENT_DIR}/..")
fi
-cd "${PROJECT_ROOT}"
+pushd "${PROJECT_ROOT}"
# Disable buffering, so that the logs stream through.
export PYTHONUNBUFFERED=1
@@ -28,23 +30,22 @@ export PYTHONUNBUFFERED=1
env | grep KOKORO
# Setup service account credentials.
-export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json
-
-# Setup project id.
-export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json")
+if [[ -f "${KOKORO_GFILE_DIR}/service-account.json" ]]
+then
+ export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json
+fi
# Set up creating a new instance for each system test run
export GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE=true
-# Remove old nox
-python3 -m pip uninstall --yes --quiet nox-automation
-
-# Install nox
-python3 -m pip install --upgrade --quiet nox
-python3 -m nox --version
+# Setup project id.
+if [[ -f "${KOKORO_GFILE_DIR}/project-id.json" ]]
+then
+ export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json")
+fi
# If this is a continuous build, send the test log to the FlakyBot.
-# See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot.
+# See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot.
if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then
cleanup() {
chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
@@ -56,7 +57,7 @@ fi
# If NOX_SESSION is set, it only runs the specified session,
# otherwise run all the sessions.
if [[ -n "${NOX_SESSION:-}" ]]; then
- python3 -m nox -s ${NOX_SESSION:-}
+ python3 -m nox -s ${NOX_SESSION:-}
else
- python3 -m nox
+ python3 -m nox
fi
diff --git a/.kokoro/continuous/prerelease-deps.cfg b/.kokoro/continuous/prerelease-deps.cfg
new file mode 100644
index 0000000000..3595fb43f5
--- /dev/null
+++ b/.kokoro/continuous/prerelease-deps.cfg
@@ -0,0 +1,7 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "prerelease_deps"
+}
diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile
deleted file mode 100644
index 4e1b1fb8b5..0000000000
--- a/.kokoro/docker/docs/Dockerfile
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2020 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ubuntu:20.04
-
-ENV DEBIAN_FRONTEND noninteractive
-
-# Ensure local Python is preferred over distribution Python.
-ENV PATH /usr/local/bin:$PATH
-
-# Install dependencies.
-RUN apt-get update \
- && apt-get install -y --no-install-recommends \
- apt-transport-https \
- build-essential \
- ca-certificates \
- curl \
- dirmngr \
- git \
- gpg-agent \
- graphviz \
- libbz2-dev \
- libdb5.3-dev \
- libexpat1-dev \
- libffi-dev \
- liblzma-dev \
- libreadline-dev \
- libsnappy-dev \
- libssl-dev \
- libsqlite3-dev \
- portaudio19-dev \
- python3-distutils \
- redis-server \
- software-properties-common \
- ssh \
- sudo \
- tcl \
- tcl-dev \
- tk \
- tk-dev \
- uuid-dev \
- wget \
- zlib1g-dev \
- && add-apt-repository universe \
- && apt-get update \
- && apt-get -y install jq \
- && apt-get clean autoclean \
- && apt-get autoremove -y \
- && rm -rf /var/lib/apt/lists/* \
- && rm -f /var/cache/apt/archives/*.deb
-
-RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \
- && python3.8 /tmp/get-pip.py \
- && rm /tmp/get-pip.py
-
-CMD ["python3.8"]
diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg
deleted file mode 100644
index e58f8f473e..0000000000
--- a/.kokoro/docs/common.cfg
+++ /dev/null
@@ -1,65 +0,0 @@
-# Format: //devtools/kokoro/config/proto/build.proto
-
-# Build logs will be here
-action {
- define_artifacts {
- regex: "**/*sponge_log.xml"
- }
-}
-
-# Download trampoline resources.
-gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
-
-# Use the trampoline script to run in docker.
-build_file: "python-spanner/.kokoro/trampoline_v2.sh"
-
-# Configure the docker image for kokoro-trampoline.
-env_vars: {
- key: "TRAMPOLINE_IMAGE"
- value: "gcr.io/cloud-devrel-kokoro-resources/python-lib-docs"
-}
-env_vars: {
- key: "TRAMPOLINE_BUILD_FILE"
- value: "github/python-spanner/.kokoro/publish-docs.sh"
-}
-
-env_vars: {
- key: "STAGING_BUCKET"
- value: "docs-staging"
-}
-
-env_vars: {
- key: "V2_STAGING_BUCKET"
- value: "docs-staging-v2"
-}
-
-# It will upload the docker image after successful builds.
-env_vars: {
- key: "TRAMPOLINE_IMAGE_UPLOAD"
- value: "true"
-}
-
-# It will always build the docker image.
-env_vars: {
- key: "TRAMPOLINE_DOCKERFILE"
- value: ".kokoro/docker/docs/Dockerfile"
-}
-
-# Fetch the token needed for reporting release status to GitHub
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "yoshi-automation-github-key"
- }
- }
-}
-
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "docuploader_service_account"
- }
- }
-}
\ No newline at end of file
diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg
deleted file mode 100644
index 505636c275..0000000000
--- a/.kokoro/docs/docs-presubmit.cfg
+++ /dev/null
@@ -1,28 +0,0 @@
-# Format: //devtools/kokoro/config/proto/build.proto
-
-env_vars: {
- key: "STAGING_BUCKET"
- value: "gcloud-python-test"
-}
-
-env_vars: {
- key: "V2_STAGING_BUCKET"
- value: "gcloud-python-test"
-}
-
-# We only upload the image in the main `docs` build.
-env_vars: {
- key: "TRAMPOLINE_IMAGE_UPLOAD"
- value: "false"
-}
-
-env_vars: {
- key: "TRAMPOLINE_BUILD_FILE"
- value: "github/python-spanner/.kokoro/build.sh"
-}
-
-# Only run this nox session.
-env_vars: {
- key: "NOX_SESSION"
- value: "docs docfx"
-}
diff --git a/.kokoro/docs/docs.cfg b/.kokoro/docs/docs.cfg
deleted file mode 100644
index 8f43917d92..0000000000
--- a/.kokoro/docs/docs.cfg
+++ /dev/null
@@ -1 +0,0 @@
-# Format: //devtools/kokoro/config/proto/build.proto
\ No newline at end of file
diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh
index f52514257e..c435402f47 100755
--- a/.kokoro/populate-secrets.sh
+++ b/.kokoro/populate-secrets.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2020 Google LLC.
+# Copyright 2024 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.kokoro/presubmit/integration-regular-sessions-enabled.cfg b/.kokoro/presubmit/integration-regular-sessions-enabled.cfg
new file mode 100644
index 0000000000..1f646bebf2
--- /dev/null
+++ b/.kokoro/presubmit/integration-regular-sessions-enabled.cfg
@@ -0,0 +1,22 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run a subset of all nox sessions
+env_vars: {
+ key: "NOX_SESSION"
+ value: "unit-3.9 unit-3.12 system-3.12"
+}
+
+env_vars: {
+ key: "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"
+ value: "false"
+}
+
+env_vars: {
+ key: "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS"
+ value: "false"
+}
+
+env_vars: {
+ key: "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW"
+ value: "false"
+}
\ No newline at end of file
diff --git a/.kokoro/presubmit/prerelease-deps.cfg b/.kokoro/presubmit/prerelease-deps.cfg
new file mode 100644
index 0000000000..3595fb43f5
--- /dev/null
+++ b/.kokoro/presubmit/prerelease-deps.cfg
@@ -0,0 +1,7 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "prerelease_deps"
+}
diff --git a/.kokoro/presubmit/presubmit.cfg b/.kokoro/presubmit/presubmit.cfg
index 8f43917d92..109c14c49a 100644
--- a/.kokoro/presubmit/presubmit.cfg
+++ b/.kokoro/presubmit/presubmit.cfg
@@ -1 +1,7 @@
-# Format: //devtools/kokoro/config/proto/build.proto
\ No newline at end of file
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run a subset of all nox sessions
+env_vars: {
+ key: "NOX_SESSION"
+ value: "unit-3.9 unit-3.12 cover docs docfx"
+}
diff --git a/.kokoro/presubmit/spanner_perf_bench.cfg b/.kokoro/presubmit/spanner_perf_bench.cfg
new file mode 100644
index 0000000000..5b4a0a126f
--- /dev/null
+++ b/.kokoro/presubmit/spanner_perf_bench.cfg
@@ -0,0 +1,8 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Disable system tests.
+env_vars: {
+ key: "RUN_SYSTEM_TESTS"
+ value: "false"
+}
+
diff --git a/.kokoro/presubmit/system-3.12.cfg b/.kokoro/presubmit/system-3.12.cfg
new file mode 100644
index 0000000000..78cdc5e851
--- /dev/null
+++ b/.kokoro/presubmit/system-3.12.cfg
@@ -0,0 +1,7 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "system-3.12"
+}
\ No newline at end of file
diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh
deleted file mode 100755
index 8acb14e802..0000000000
--- a/.kokoro/publish-docs.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eo pipefail
-
-# Disable buffering, so that the logs stream through.
-export PYTHONUNBUFFERED=1
-
-export PATH="${HOME}/.local/bin:${PATH}"
-
-# Install nox
-python3 -m pip install --user --upgrade --quiet nox
-python3 -m nox --version
-
-# build docs
-nox -s docs
-
-python3 -m pip install --user gcp-docuploader
-
-# create metadata
-python3 -m docuploader create-metadata \
- --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \
- --version=$(python3 setup.py --version) \
- --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \
- --distribution-name=$(python3 setup.py --name) \
- --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \
- --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \
- --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json)
-
-cat docs.metadata
-
-# upload docs
-python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}"
-
-
-# docfx yaml files
-nox -s docfx
-
-# create metadata.
-python3 -m docuploader create-metadata \
- --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \
- --version=$(python3 setup.py --version) \
- --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \
- --distribution-name=$(python3 setup.py --name) \
- --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \
- --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \
- --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json)
-
-cat docs.metadata
-
-# upload docs
-python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}"
diff --git a/.kokoro/release.sh b/.kokoro/release.sh
deleted file mode 100755
index 6bdc59e4b5..0000000000
--- a/.kokoro/release.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eo pipefail
-
-# Start the releasetool reporter
-python3 -m pip install gcp-releasetool
-python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script
-
-# Ensure that we have the latest versions of Twine, Wheel, and Setuptools.
-python3 -m pip install --upgrade twine wheel setuptools
-
-# Disable buffering, so that the logs stream through.
-export PYTHONUNBUFFERED=1
-
-# Move into the package, build the distribution and upload.
-TWINE_PASSWORD=$(cat "${KOKORO_GFILE_DIR}/secret_manager/google-cloud-pypi-token")
-cd github/python-spanner
-python3 setup.py sdist bdist_wheel
-twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/*
diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg
deleted file mode 100644
index a09b99531d..0000000000
--- a/.kokoro/release/common.cfg
+++ /dev/null
@@ -1,30 +0,0 @@
-# Format: //devtools/kokoro/config/proto/build.proto
-
-# Build logs will be here
-action {
- define_artifacts {
- regex: "**/*sponge_log.xml"
- }
-}
-
-# Download trampoline resources.
-gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
-
-# Use the trampoline script to run in docker.
-build_file: "python-spanner/.kokoro/trampoline.sh"
-
-# Configure the docker image for kokoro-trampoline.
-env_vars: {
- key: "TRAMPOLINE_IMAGE"
- value: "gcr.io/cloud-devrel-kokoro-resources/python-multi"
-}
-env_vars: {
- key: "TRAMPOLINE_BUILD_FILE"
- value: "github/python-spanner/.kokoro/release.sh"
-}
-
-# Tokens needed to report release status back to GitHub
-env_vars: {
- key: "SECRET_MANAGER_KEYS"
- value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem,google-cloud-pypi-token"
-}
diff --git a/.kokoro/release/release.cfg b/.kokoro/release/release.cfg
deleted file mode 100644
index 8f43917d92..0000000000
--- a/.kokoro/release/release.cfg
+++ /dev/null
@@ -1 +0,0 @@
-# Format: //devtools/kokoro/config/proto/build.proto
\ No newline at end of file
diff --git a/.kokoro/samples/lint/common.cfg b/.kokoro/samples/lint/common.cfg
index 28beef0844..5a5cd9700a 100644
--- a/.kokoro/samples/lint/common.cfg
+++ b/.kokoro/samples/lint/common.cfg
@@ -31,4 +31,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-spanner/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-spanner/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.10/common.cfg
similarity index 88%
rename from .kokoro/samples/python3.8/common.cfg
rename to .kokoro/samples/python3.10/common.cfg
index 58713430dd..6aae8b71f9 100644
--- a/.kokoro/samples/python3.8/common.cfg
+++ b/.kokoro/samples/python3.10/common.cfg
@@ -10,13 +10,13 @@ action {
# Specify which tests to run
env_vars: {
key: "RUN_TESTS_SESSION"
- value: "py-3.8"
+ value: "py-3.10"
}
# Declare build specific Cloud project.
env_vars: {
key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
- value: "python-docs-samples-tests-py38"
+ value: "python-docs-samples-tests-310"
}
env_vars: {
@@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-spanner/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-spanner/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.7/continuous.cfg b/.kokoro/samples/python3.10/continuous.cfg
similarity index 100%
rename from .kokoro/samples/python3.7/continuous.cfg
rename to .kokoro/samples/python3.10/continuous.cfg
diff --git a/.kokoro/samples/python3.6/periodic-head.cfg b/.kokoro/samples/python3.10/periodic-head.cfg
similarity index 100%
rename from .kokoro/samples/python3.6/periodic-head.cfg
rename to .kokoro/samples/python3.10/periodic-head.cfg
diff --git a/.kokoro/samples/python3.6/periodic.cfg b/.kokoro/samples/python3.10/periodic.cfg
similarity index 98%
rename from .kokoro/samples/python3.6/periodic.cfg
rename to .kokoro/samples/python3.10/periodic.cfg
index 50fec96497..71cd1e597e 100644
--- a/.kokoro/samples/python3.6/periodic.cfg
+++ b/.kokoro/samples/python3.10/periodic.cfg
@@ -3,4 +3,4 @@
env_vars: {
key: "INSTALL_LIBRARY_FROM_SOURCE"
value: "False"
-}
\ No newline at end of file
+}
diff --git a/.kokoro/samples/python3.6/presubmit.cfg b/.kokoro/samples/python3.10/presubmit.cfg
similarity index 100%
rename from .kokoro/samples/python3.6/presubmit.cfg
rename to .kokoro/samples/python3.10/presubmit.cfg
diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.11/common.cfg
similarity index 88%
rename from .kokoro/samples/python3.7/common.cfg
rename to .kokoro/samples/python3.11/common.cfg
index 07195c4c5e..fb30c1b856 100644
--- a/.kokoro/samples/python3.7/common.cfg
+++ b/.kokoro/samples/python3.11/common.cfg
@@ -10,13 +10,13 @@ action {
# Specify which tests to run
env_vars: {
key: "RUN_TESTS_SESSION"
- value: "py-3.7"
+ value: "py-3.11"
}
# Declare build specific Cloud project.
env_vars: {
key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
- value: "python-docs-samples-tests-py37"
+ value: "python-docs-samples-tests-311"
}
env_vars: {
@@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-spanner/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-spanner/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.8/continuous.cfg b/.kokoro/samples/python3.11/continuous.cfg
similarity index 100%
rename from .kokoro/samples/python3.8/continuous.cfg
rename to .kokoro/samples/python3.11/continuous.cfg
diff --git a/.kokoro/samples/python3.7/periodic-head.cfg b/.kokoro/samples/python3.11/periodic-head.cfg
similarity index 100%
rename from .kokoro/samples/python3.7/periodic-head.cfg
rename to .kokoro/samples/python3.11/periodic-head.cfg
diff --git a/.kokoro/samples/python3.7/periodic.cfg b/.kokoro/samples/python3.11/periodic.cfg
similarity index 98%
rename from .kokoro/samples/python3.7/periodic.cfg
rename to .kokoro/samples/python3.11/periodic.cfg
index 50fec96497..71cd1e597e 100644
--- a/.kokoro/samples/python3.7/periodic.cfg
+++ b/.kokoro/samples/python3.11/periodic.cfg
@@ -3,4 +3,4 @@
env_vars: {
key: "INSTALL_LIBRARY_FROM_SOURCE"
value: "False"
-}
\ No newline at end of file
+}
diff --git a/.kokoro/samples/python3.7/presubmit.cfg b/.kokoro/samples/python3.11/presubmit.cfg
similarity index 100%
rename from .kokoro/samples/python3.7/presubmit.cfg
rename to .kokoro/samples/python3.11/presubmit.cfg
diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.12/common.cfg
similarity index 88%
rename from .kokoro/samples/python3.6/common.cfg
rename to .kokoro/samples/python3.12/common.cfg
index 58b15c2849..4571a6d12d 100644
--- a/.kokoro/samples/python3.6/common.cfg
+++ b/.kokoro/samples/python3.12/common.cfg
@@ -10,13 +10,13 @@ action {
# Specify which tests to run
env_vars: {
key: "RUN_TESTS_SESSION"
- value: "py-3.6"
+ value: "py-3.12"
}
# Declare build specific Cloud project.
env_vars: {
key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
- value: "python-docs-samples-tests-py36"
+ value: "python-docs-samples-tests-312"
}
env_vars: {
@@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-spanner/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-spanner/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.8/presubmit.cfg b/.kokoro/samples/python3.12/continuous.cfg
similarity index 100%
rename from .kokoro/samples/python3.8/presubmit.cfg
rename to .kokoro/samples/python3.12/continuous.cfg
diff --git a/.kokoro/samples/python3.8/periodic-head.cfg b/.kokoro/samples/python3.12/periodic-head.cfg
similarity index 100%
rename from .kokoro/samples/python3.8/periodic-head.cfg
rename to .kokoro/samples/python3.12/periodic-head.cfg
diff --git a/.kokoro/samples/python3.8/periodic.cfg b/.kokoro/samples/python3.12/periodic.cfg
similarity index 98%
rename from .kokoro/samples/python3.8/periodic.cfg
rename to .kokoro/samples/python3.12/periodic.cfg
index 50fec96497..71cd1e597e 100644
--- a/.kokoro/samples/python3.8/periodic.cfg
+++ b/.kokoro/samples/python3.12/periodic.cfg
@@ -3,4 +3,4 @@
env_vars: {
key: "INSTALL_LIBRARY_FROM_SOURCE"
value: "False"
-}
\ No newline at end of file
+}
diff --git a/.kokoro/samples/python3.12/presubmit.cfg b/.kokoro/samples/python3.12/presubmit.cfg
new file mode 100644
index 0000000000..a1c8d9759c
--- /dev/null
+++ b/.kokoro/samples/python3.12/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.13/common.cfg b/.kokoro/samples/python3.13/common.cfg
new file mode 100644
index 0000000000..53d26c62af
--- /dev/null
+++ b/.kokoro/samples/python3.13/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.13"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-313"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-spanner/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-spanner/.kokoro/trampoline_v2.sh"
diff --git a/.kokoro/samples/python3.6/continuous.cfg b/.kokoro/samples/python3.13/continuous.cfg
similarity index 97%
rename from .kokoro/samples/python3.6/continuous.cfg
rename to .kokoro/samples/python3.13/continuous.cfg
index 7218af1499..a1c8d9759c 100644
--- a/.kokoro/samples/python3.6/continuous.cfg
+++ b/.kokoro/samples/python3.13/continuous.cfg
@@ -3,5 +3,4 @@
env_vars: {
key: "INSTALL_LIBRARY_FROM_SOURCE"
value: "True"
-}
-
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.13/periodic-head.cfg b/.kokoro/samples/python3.13/periodic-head.cfg
new file mode 100644
index 0000000000..b6133a1180
--- /dev/null
+++ b/.kokoro/samples/python3.13/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-spanner/.kokoro/test-samples-against-head.sh"
+}
diff --git a/.kokoro/samples/python3.13/periodic.cfg b/.kokoro/samples/python3.13/periodic.cfg
new file mode 100644
index 0000000000..71cd1e597e
--- /dev/null
+++ b/.kokoro/samples/python3.13/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
diff --git a/.kokoro/samples/python3.13/presubmit.cfg b/.kokoro/samples/python3.13/presubmit.cfg
new file mode 100644
index 0000000000..a1c8d9759c
--- /dev/null
+++ b/.kokoro/samples/python3.13/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.9/common.cfg b/.kokoro/samples/python3.9/common.cfg
index a62ce6bdd2..46182a2f57 100644
--- a/.kokoro/samples/python3.9/common.cfg
+++ b/.kokoro/samples/python3.9/common.cfg
@@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-spanner/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-spanner/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.9/periodic.cfg b/.kokoro/samples/python3.9/periodic.cfg
index 50fec96497..71cd1e597e 100644
--- a/.kokoro/samples/python3.9/periodic.cfg
+++ b/.kokoro/samples/python3.9/periodic.cfg
@@ -3,4 +3,4 @@
env_vars: {
key: "INSTALL_LIBRARY_FROM_SOURCE"
value: "False"
-}
\ No newline at end of file
+}
diff --git a/.kokoro/test-samples-against-head.sh b/.kokoro/test-samples-against-head.sh
index 4398b30ba4..e9d8bd79a6 100755
--- a/.kokoro/test-samples-against-head.sh
+++ b/.kokoro/test-samples-against-head.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2020 Google LLC
+# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,6 +23,4 @@ set -eo pipefail
# Enables `**` to include files nested inside sub-folders
shopt -s globstar
-cd github/python-spanner
-
exec .kokoro/test-samples-impl.sh
diff --git a/.kokoro/test-samples-impl.sh b/.kokoro/test-samples-impl.sh
index 311a8d54b9..776365a831 100755
--- a/.kokoro/test-samples-impl.sh
+++ b/.kokoro/test-samples-impl.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2021 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,6 +20,8 @@ set -eo pipefail
# Enables `**` to include files nested inside sub-folders
shopt -s globstar
+DIFF_FROM="origin/main..."
+
# Exit early if samples don't exist
if ! find samples -name 'requirements.txt' | grep -q .; then
echo "No tests run. './samples/**/requirements.txt' not found"
@@ -33,7 +35,7 @@ export PYTHONUNBUFFERED=1
env | grep KOKORO
# Install nox
-python3.6 -m pip install --upgrade --quiet nox
+python3.9 -m pip install --upgrade --quiet nox
# Use secrets acessor service account to get secrets
if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then
@@ -71,16 +73,26 @@ for file in samples/**/requirements.txt; do
file=$(dirname "$file")
cd "$file"
+ # If $DIFF_FROM is set, use it to check for changes in this directory.
+ if [[ -n "${DIFF_FROM:-}" ]]; then
+ git diff --quiet "$DIFF_FROM" .
+ CHANGED=$?
+ if [[ "$CHANGED" -eq 0 ]]; then
+ # echo -e "\n Skipping $file: no changes in folder.\n"
+ continue
+ fi
+ fi
+
echo "------------------------------------------------------------"
echo "- testing $file"
echo "------------------------------------------------------------"
# Use nox to execute the tests for the project.
- python3.6 -m nox -s "$RUN_TESTS_SESSION"
+ python3.9 -m nox -s "$RUN_TESTS_SESSION"
EXIT=$?
# If this is a periodic build, send the test log to the FlakyBot.
- # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot.
+ # See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot.
if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
$KOKORO_GFILE_DIR/linux_amd64/flakybot
diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh
index 19e3d5f529..7933d82014 100755
--- a/.kokoro/test-samples.sh
+++ b/.kokoro/test-samples.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2020 Google LLC
+# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -24,8 +24,6 @@ set -eo pipefail
# Enables `**` to include files nested inside sub-folders
shopt -s globstar
-cd github/python-spanner
-
# Run periodic samples tests at latest release
if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
# preserving the test runner implementation.
diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh
index f39236e943..48f7969970 100755
--- a/.kokoro/trampoline.sh
+++ b/.kokoro/trampoline.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2017 Google Inc.
+# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh
index 4af6cdc26d..35fa529231 100755
--- a/.kokoro/trampoline_v2.sh
+++ b/.kokoro/trampoline_v2.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Copyright 2020 Google LLC
+# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 62eb5a77d9..1d74695f70 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,4 +1,4 @@
-# Copyright 2021 Google LLC
+# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,10 +22,10 @@ repos:
- id: end-of-file-fixer
- id: check-yaml
- repo: https://github.com/psf/black
- rev: 19.10b0
+ rev: 23.7.0
hooks:
- id: black
-- repo: https://gitlab.com/pycqa/flake8
- rev: 3.9.2
+- repo: https://github.com/pycqa/flake8
+ rev: 6.1.0
hooks:
- id: flake8
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
new file mode 100644
index 0000000000..63ab47b126
--- /dev/null
+++ b/.release-please-manifest.json
@@ -0,0 +1,3 @@
+{
+ ".": "3.58.0"
+}
diff --git a/.repo-metadata.json b/.repo-metadata.json
index 950a765d11..9569af6e31 100644
--- a/.repo-metadata.json
+++ b/.repo-metadata.json
@@ -1,14 +1,18 @@
{
- "name": "spanner",
- "name_pretty": "Cloud Spanner",
- "product_documentation": "https://cloud.google.com/spanner/docs/",
- "client_documentation": "https://googleapis.dev/python/spanner/latest",
- "issue_tracker": "https://issuetracker.google.com/issues?q=componentid:190851%2B%20status:open",
- "release_level": "ga",
- "language": "python",
- "library_type": "GAPIC_COMBO",
- "repo": "googleapis/python-spanner",
- "distribution_name": "google-cloud-spanner",
- "api_id": "spanner.googleapis.com",
- "requires_billing": true
-}
\ No newline at end of file
+ "name": "spanner",
+ "name_pretty": "Cloud Spanner",
+ "product_documentation": "https://cloud.google.com/spanner/docs/",
+ "client_documentation": "https://cloud.google.com/python/docs/reference/spanner/latest",
+ "issue_tracker": "https://issuetracker.google.com/issues?q=componentid:190851%2B%20status:open",
+ "release_level": "stable",
+ "language": "python",
+ "library_type": "GAPIC_COMBO",
+ "repo": "googleapis/python-spanner",
+ "distribution_name": "google-cloud-spanner",
+ "api_id": "spanner.googleapis.com",
+ "requires_billing": true,
+ "default_version": "v1",
+ "codeowner_team": "@googleapis/spanner-client-libraries-python",
+ "api_shortname": "spanner",
+ "api_description": "is a fully managed, mission-critical, \nrelational database service that offers transactional consistency at global scale, \nschemas, SQL (ANSI 2011 with extensions), and automatic, synchronous replication \nfor high availability.\n\nBe sure to activate the Cloud Spanner API on the Developer's Console to\nuse Cloud Spanner from your project."
+}
diff --git a/.trampolinerc b/.trampolinerc
index 383b6ec89f..0080152373 100644
--- a/.trampolinerc
+++ b/.trampolinerc
@@ -1,4 +1,4 @@
-# Copyright 2020 Google LLC
+# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,19 +12,28 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Template for .trampolinerc
-
# Add required env vars here.
required_envvars+=(
- "STAGING_BUCKET"
- "V2_STAGING_BUCKET"
)
# Add env vars which are passed down into the container here.
pass_down_envvars+=(
+ "NOX_SESSION"
+ ###############
+ # Docs builds
+ ###############
"STAGING_BUCKET"
"V2_STAGING_BUCKET"
- "NOX_SESSION"
+ ##################
+ # Samples builds
+ ##################
+ "INSTALL_LIBRARY_FROM_SOURCE"
+ "RUN_TESTS_SESSION"
+ "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ # Target directories.
+ "RUN_TESTS_DIRS"
+ # The nox session to run.
+ "RUN_TESTS_SESSION"
)
# Prevent unintentional override on the default image.
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4d7cda8919..2c2f33e74f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,871 @@
[1]: https://pypi.org/project/google-cloud-spanner/#history
+## [3.58.0](https://github.com/googleapis/python-spanner/compare/v3.57.0...v3.58.0) (2025-09-10)
+
+
+### Features
+
+* **spanner:** Support setting read lock mode ([#1404](https://github.com/googleapis/python-spanner/issues/1404)) ([ee24c6e](https://github.com/googleapis/python-spanner/commit/ee24c6ee2643bc74d52e9f0a924b80a830fa2697))
+
+
+### Dependencies
+
+* Remove Python 3.7 and 3.8 as supported runtimes ([#1395](https://github.com/googleapis/python-spanner/issues/1395)) ([fc93792](https://github.com/googleapis/python-spanner/commit/fc9379232224f56d29d2e36559a756c05a5478ff))
+
+## [3.57.0](https://github.com/googleapis/python-spanner/compare/v3.56.0...v3.57.0) (2025-08-14)
+
+
+### Features
+
+* Support configuring logger in dbapi kwargs ([#1400](https://github.com/googleapis/python-spanner/issues/1400)) ([ffa5c9e](https://github.com/googleapis/python-spanner/commit/ffa5c9e627583ab0635dcaa5512b6e034d811d86))
+
+## [3.56.0](https://github.com/googleapis/python-spanner/compare/v3.55.0...v3.56.0) (2025-07-24)
+
+
+### Features
+
+* Add support for multiplexed sessions - read/write ([#1389](https://github.com/googleapis/python-spanner/issues/1389)) ([ce3f230](https://github.com/googleapis/python-spanner/commit/ce3f2305cd5589e904daa18142fbfeb180f3656a))
+* Add support for multiplexed sessions ([#1383](https://github.com/googleapis/python-spanner/issues/1383)) ([21f5028](https://github.com/googleapis/python-spanner/commit/21f5028c3fdf8b8632c1564efbd973b96711d03b))
+* Default enable multiplex session for all operations unless explicitly set to false ([#1394](https://github.com/googleapis/python-spanner/issues/1394)) ([651ca9c](https://github.com/googleapis/python-spanner/commit/651ca9cd65c713ac59a7d8f55b52b9df5b4b6923))
+* **spanner:** Add new change_stream.proto ([#1382](https://github.com/googleapis/python-spanner/issues/1382)) ([ca6255e](https://github.com/googleapis/python-spanner/commit/ca6255e075944d863ab4be31a681fc7c27817e34))
+
+
+### Performance Improvements
+
+* Skip gRPC trailers for StreamingRead & ExecuteStreamingSql ([#1385](https://github.com/googleapis/python-spanner/issues/1385)) ([cb25de4](https://github.com/googleapis/python-spanner/commit/cb25de40b86baf83d0fb1b8ca015f798671319ee))
+
+## [3.55.0](https://github.com/googleapis/python-spanner/compare/v3.54.0...v3.55.0) (2025-05-28)
+
+
+### Features
+
+* Add a `last` field in the `PartialResultSet` ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* Add support for multiplexed sessions ([#1381](https://github.com/googleapis/python-spanner/issues/1381)) ([97d7268](https://github.com/googleapis/python-spanner/commit/97d7268ac12a57d9d116ee3d9475580e1e7e07ae))
+* Add throughput_mode to UpdateDatabaseDdlRequest to be used by Spanner Migration Tool. See https://github.com/GoogleCloudPlatform/spanner-migration-tool ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* Support fine-grained permissions database roles in connect ([#1338](https://github.com/googleapis/python-spanner/issues/1338)) ([064d9dc](https://github.com/googleapis/python-spanner/commit/064d9dc3441a617cbc80af6e16493bc42c89b3c9))
+
+
+### Bug Fixes
+
+* E2E tracing metadata append issue ([#1357](https://github.com/googleapis/python-spanner/issues/1357)) ([3943885](https://github.com/googleapis/python-spanner/commit/394388595a312f60b423dfbfd7aaf2724cc4454f))
+* Pass through kwargs in dbapi connect ([#1368](https://github.com/googleapis/python-spanner/issues/1368)) ([aae8d61](https://github.com/googleapis/python-spanner/commit/aae8d6161580c88354d813fe75a297c318f1c2c7))
+* Remove setup.cfg configuration for creating universal wheels ([#1324](https://github.com/googleapis/python-spanner/issues/1324)) ([e064474](https://github.com/googleapis/python-spanner/commit/e0644744d7f3fcea42b461996fc0ee22d4218599))
+
+
+### Documentation
+
+* A comment for field `chunked_value` in message `.google.spanner.v1.PartialResultSet` is changed ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* A comment for field `precommit_token` in message `.google.spanner.v1.PartialResultSet` is changed ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* A comment for field `precommit_token` in message `.google.spanner.v1.ResultSet` is changed ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* A comment for field `query_plan` in message `.google.spanner.v1.ResultSetStats` is changed ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* A comment for field `row_count_lower_bound` in message `.google.spanner.v1.ResultSetStats` is changed ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* A comment for field `row_type` in message `.google.spanner.v1.ResultSetMetadata` is changed ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* A comment for field `rows` in message `.google.spanner.v1.ResultSet` is changed ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* A comment for field `stats` in message `.google.spanner.v1.PartialResultSet` is changed ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* A comment for field `stats` in message `.google.spanner.v1.ResultSet` is changed ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* A comment for field `values` in message `.google.spanner.v1.PartialResultSet` is changed ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* A comment for message `ResultSetMetadata` is changed ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* A comment for message `ResultSetStats` is changed ([d532d57](https://github.com/googleapis/python-spanner/commit/d532d57fd5908ecd7bc9dfff73695715cc4b1ebe))
+* Fix markdown formatting in transactions page ([#1377](https://github.com/googleapis/python-spanner/issues/1377)) ([de322f8](https://github.com/googleapis/python-spanner/commit/de322f89642a3c13b6b1d4b9b1a2cdf4c8f550fb))
+
+## [3.54.0](https://github.com/googleapis/python-spanner/compare/v3.53.0...v3.54.0) (2025-04-28)
+
+
+### Features
+
+* Add interval type support ([#1340](https://github.com/googleapis/python-spanner/issues/1340)) ([6ca9b43](https://github.com/googleapis/python-spanner/commit/6ca9b43c3038eca1317c7c9b7e3543b5f1bc68ad))
+* Add sample for pre-split feature ([#1333](https://github.com/googleapis/python-spanner/issues/1333)) ([ca76108](https://github.com/googleapis/python-spanner/commit/ca76108809174e4f3eea38d7ac2463d9b4c73304))
+* Add SQL statement for begin transaction isolation level ([#1331](https://github.com/googleapis/python-spanner/issues/1331)) ([3ac0f91](https://github.com/googleapis/python-spanner/commit/3ac0f9131b38e5cfb2b574d3d73b03736b871712))
+* Support transaction isolation level in dbapi ([#1327](https://github.com/googleapis/python-spanner/issues/1327)) ([03400c4](https://github.com/googleapis/python-spanner/commit/03400c40f1c1cc73e51733f2a28910a8dd78e7d9))
+
+
+### Bug Fixes
+
+* Improve client-side regex statement parser ([#1328](https://github.com/googleapis/python-spanner/issues/1328)) ([b3c259d](https://github.com/googleapis/python-spanner/commit/b3c259deec817812fd8e4940faacf4a927d0d69c))
+
+## [3.53.0](https://github.com/googleapis/python-spanner/compare/v3.52.0...v3.53.0) (2025-03-12)
+
+
+### Features
+
+* Add AddSplitPoints API ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* Add Attempt, Operation and GFE Metrics ([#1302](https://github.com/googleapis/python-spanner/issues/1302)) ([fb21d9a](https://github.com/googleapis/python-spanner/commit/fb21d9acf2545cf7b8e9e21b65eabf21a7bf895f))
+* Add REST Interceptors which support reading metadata ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* Add support for opt-in debug logging ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* Add support for reading selective GAPIC generation methods from service YAML ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* Add the last statement option to ExecuteSqlRequest and ExecuteBatchDmlRequest ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* Add UUID in Spanner TypeCode enum ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* End to end tracing ([#1315](https://github.com/googleapis/python-spanner/issues/1315)) ([aa5d0e6](https://github.com/googleapis/python-spanner/commit/aa5d0e6c1d3e5b0e4b0578e80c21e7c523c30fb5))
+* Exposing FreeInstanceAvailability in InstanceConfig ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* Exposing FreeInstanceMetadata in Instance configuration (to define the metadata related to FREE instance type) ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* Exposing InstanceType in Instance configuration (to define PROVISIONED or FREE spanner instance) ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* Exposing QuorumType in InstanceConfig ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* Exposing storage_limit_per_processing_unit in InstanceConfig ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* Snapshot isolation ([#1318](https://github.com/googleapis/python-spanner/issues/1318)) ([992fcae](https://github.com/googleapis/python-spanner/commit/992fcae2d4fd2b47380d159a3416b8d6d6e1c937))
+* **spanner:** A new enum `IsolationLevel` is added ([#1224](https://github.com/googleapis/python-spanner/issues/1224)) ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+
+
+### Bug Fixes
+
+* Allow Protobuf 6.x ([#1320](https://github.com/googleapis/python-spanner/issues/1320)) ([1faab91](https://github.com/googleapis/python-spanner/commit/1faab91790ae3e2179fbab11b69bb02254ab048a))
+* Cleanup after metric integration test ([#1322](https://github.com/googleapis/python-spanner/issues/1322)) ([d7cf8b9](https://github.com/googleapis/python-spanner/commit/d7cf8b968dfc2b98d3b1d7ae8a025da55bec0767))
+* **deps:** Require grpc-google-iam-v1>=0.14.0 ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* Fix typing issue with gRPC metadata when key ends in -bin ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+
+
+### Performance Improvements
+
+* Add option for last_statement ([#1313](https://github.com/googleapis/python-spanner/issues/1313)) ([19ab6ef](https://github.com/googleapis/python-spanner/commit/19ab6ef0d58262ebb19183e700db6cf124f9b3c5))
+
+
+### Documentation
+
+* A comment for enum `DefaultBackupScheduleType` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for enum value `AUTOMATIC` in enum `DefaultBackupScheduleType` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for enum value `GOOGLE_MANAGED` in enum `Type` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for enum value `NONE` in enum `DefaultBackupScheduleType` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for enum value `USER_MANAGED` in enum `Type` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `base_config` in message `.google.spanner.admin.instance.v1.InstanceConfig` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `default_backup_schedule_type` in message `.google.spanner.admin.instance.v1.Instance` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `filter` in message `.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `filter` in message `.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `instance_config` in message `.google.spanner.admin.instance.v1.CreateInstanceConfigRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `instance_partition_deadline` in message `.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `location` in message `.google.spanner.admin.instance.v1.ReplicaInfo` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `node_count` in message `.google.spanner.admin.instance.v1.Instance` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `node_count` in message `.google.spanner.admin.instance.v1.InstancePartition` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `operations` in message `.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `operations` in message `.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `optional_replicas` in message `.google.spanner.admin.instance.v1.InstanceConfig` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `parent` in message `.google.spanner.admin.instance.v1.ListInstancePartitionsRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `processing_units` in message `.google.spanner.admin.instance.v1.Instance` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `processing_units` in message `.google.spanner.admin.instance.v1.InstancePartition` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `referencing_backups` in message `.google.spanner.admin.instance.v1.InstancePartition` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `replicas` in message `.google.spanner.admin.instance.v1.InstanceConfig` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `storage_utilization_percent` in message `.google.spanner.admin.instance.v1.AutoscalingConfig` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for field `unreachable` in message `.google.spanner.admin.instance.v1.ListInstancePartitionsResponse` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for message `CreateInstanceConfigRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for message `DeleteInstanceConfigRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for message `UpdateInstanceConfigRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for method `CreateInstance` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for method `CreateInstanceConfig` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for method `CreateInstancePartition` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for method `ListInstanceConfigOperations` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for method `ListInstanceConfigs` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for method `ListInstancePartitionOperations` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for method `MoveInstance` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for method `UpdateInstance` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for method `UpdateInstanceConfig` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* A comment for method `UpdateInstancePartition` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+* Fix typo timzeone -> timezone ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94))
+
+## [3.52.0](https://github.com/googleapis/python-spanner/compare/v3.51.0...v3.52.0) (2025-02-19)
+
+
+### Features
+
+* Add additional opentelemetry span events for session pool ([a6811af](https://github.com/googleapis/python-spanner/commit/a6811afefa6739caa20203048635d94f9b85c4c8))
+* Add GCP standard otel attributes for python client ([#1308](https://github.com/googleapis/python-spanner/issues/1308)) ([0839f98](https://github.com/googleapis/python-spanner/commit/0839f982a3e7f5142825d10c440005a39cdb39cb))
+* Add updated span events + trace more methods ([#1259](https://github.com/googleapis/python-spanner/issues/1259)) ([ad69c48](https://github.com/googleapis/python-spanner/commit/ad69c48f01b09cbc5270b9cefde23715d5ac54b6))
+* MetricsTracer implementation ([#1291](https://github.com/googleapis/python-spanner/issues/1291)) ([8fbde6b](https://github.com/googleapis/python-spanner/commit/8fbde6b84d11db12ee4d536f0d5b8064619bdaa9))
+* Support GRAPH and pipe syntax in dbapi ([#1285](https://github.com/googleapis/python-spanner/issues/1285)) ([959bb9c](https://github.com/googleapis/python-spanner/commit/959bb9cda953eead89ffc271cb2a472e7139f81c))
+* Support transaction and request tags in dbapi ([#1262](https://github.com/googleapis/python-spanner/issues/1262)) ([ee9662f](https://github.com/googleapis/python-spanner/commit/ee9662f57dbb730afb08b9b9829e4e19bda5e69a))
+* **x-goog-spanner-request-id:** Introduce AtomicCounter ([#1275](https://github.com/googleapis/python-spanner/issues/1275)) ([f2483e1](https://github.com/googleapis/python-spanner/commit/f2483e11ba94f8bd1e142d1a85347d90104d1a19))
+
+
+### Bug Fixes
+
+* Retry UNAVAILABLE errors for streaming RPCs ([#1278](https://github.com/googleapis/python-spanner/issues/1278)) ([ab31078](https://github.com/googleapis/python-spanner/commit/ab310786baf09033a28c76e843b654e98a21613d)), closes [#1150](https://github.com/googleapis/python-spanner/issues/1150)
+* **tracing:** Ensure nesting of Transaction.begin under commit + fix suggestions from feature review ([#1287](https://github.com/googleapis/python-spanner/issues/1287)) ([d9ee75a](https://github.com/googleapis/python-spanner/commit/d9ee75ac9ecfbf37a95c95a56295bdd79da3006d))
+* **tracing:** Only set span.status=OK if UNSET ([#1248](https://github.com/googleapis/python-spanner/issues/1248)) ([1d393fe](https://github.com/googleapis/python-spanner/commit/1d393fedf3be8b36c91d0f52a5f23cfa5c05f835)), closes [#1246](https://github.com/googleapis/python-spanner/issues/1246)
+* Update retry strategy for mutation calls to handle aborted transactions ([#1279](https://github.com/googleapis/python-spanner/issues/1279)) ([0887eb4](https://github.com/googleapis/python-spanner/commit/0887eb43b6ea8bd9076ca81977d1446011335853))
+
+## [3.51.0](https://github.com/googleapis/python-spanner/compare/v3.50.1...v3.51.0) (2024-12-05)
+
+
+### Features
+
+* Add connection variable for ignoring transaction warnings ([#1249](https://github.com/googleapis/python-spanner/issues/1249)) ([eeb7836](https://github.com/googleapis/python-spanner/commit/eeb7836b6350aa9626dfb733208e6827d38bb9c9))
+* **spanner:** Implement custom tracer_provider injection for opentelemetry traces ([#1229](https://github.com/googleapis/python-spanner/issues/1229)) ([6869ed6](https://github.com/googleapis/python-spanner/commit/6869ed651e41d7a8af046884bc6c792a4177f766))
+* Support float32 parameters in dbapi ([#1245](https://github.com/googleapis/python-spanner/issues/1245)) ([829b799](https://github.com/googleapis/python-spanner/commit/829b799e0c9c6da274bf95c272cda564cfdba928))
+
+
+### Bug Fixes
+
+* Allow setting connection.read_only to same value ([#1247](https://github.com/googleapis/python-spanner/issues/1247)) ([5e8ca94](https://github.com/googleapis/python-spanner/commit/5e8ca949b583fbcf0b92b42696545973aad8c78f))
+* Allow setting staleness to same value in tx ([#1253](https://github.com/googleapis/python-spanner/issues/1253)) ([a214885](https://github.com/googleapis/python-spanner/commit/a214885ed474f3d69875ef580d5f8cbbabe9199a))
+* Dbapi raised AttributeError with [] as arguments ([#1257](https://github.com/googleapis/python-spanner/issues/1257)) ([758bf48](https://github.com/googleapis/python-spanner/commit/758bf4889a7f3346bc8282a3eed47aee43be650c))
+
+
+### Performance Improvements
+
+* Optimize ResultSet decoding ([#1244](https://github.com/googleapis/python-spanner/issues/1244)) ([ccae6e0](https://github.com/googleapis/python-spanner/commit/ccae6e0287ba6cf3c14f15a907b2106b11ef1fdc))
+* Remove repeated GetSession calls for FixedSizePool ([#1252](https://github.com/googleapis/python-spanner/issues/1252)) ([c064815](https://github.com/googleapis/python-spanner/commit/c064815abaaa4b564edd6f0e365a37e7e839080c))
+
+
+### Documentation
+
+* **samples:** Add samples for Cloud Spanner Default Backup Schedules ([#1238](https://github.com/googleapis/python-spanner/issues/1238)) ([054a186](https://github.com/googleapis/python-spanner/commit/054a18658eedc5d4dbecb7508baa3f3d67f5b815))
+
+## [3.50.1](https://github.com/googleapis/python-spanner/compare/v3.50.0...v3.50.1) (2024-11-14)
+
+
+### Bug Fixes
+
+* Json data type for non object values ([#1236](https://github.com/googleapis/python-spanner/issues/1236)) ([0007be3](https://github.com/googleapis/python-spanner/commit/0007be37a65ff0d4b6b5a1c9ee53d884957c4942))
+* **spanner:** Multi_scm issue in python release ([#1230](https://github.com/googleapis/python-spanner/issues/1230)) ([6d64e9f](https://github.com/googleapis/python-spanner/commit/6d64e9f5ccc811600b5b51a27c19e84ad5957e2a))
+
+## [3.50.0](https://github.com/googleapis/python-spanner/compare/v3.49.1...v3.50.0) (2024-11-11)
+
+
+### Features
+
+* **spanner:** Add support for Cloud Spanner Default Backup Schedules ([45d4517](https://github.com/googleapis/python-spanner/commit/45d4517789660a803849b829c8eae8b4ea227599))
+
+
+### Bug Fixes
+
+* Add PROTO in streaming chunks ([#1213](https://github.com/googleapis/python-spanner/issues/1213)) ([43c190b](https://github.com/googleapis/python-spanner/commit/43c190bc694d56e0c57d96dbaa7fc48117f3c971))
+* Pass through route-to-leader option in dbapi ([#1223](https://github.com/googleapis/python-spanner/issues/1223)) ([ec6c204](https://github.com/googleapis/python-spanner/commit/ec6c204f66e5c8419ea25c4b77f18a38a57acf81))
+* Pin `nox` version in `requirements.in` for devcontainer. ([#1215](https://github.com/googleapis/python-spanner/issues/1215)) ([41604fe](https://github.com/googleapis/python-spanner/commit/41604fe297d02f5cc2e5516ba24e0fdcceda8e26))
+
+
+### Documentation
+
+* Allow multiple KMS keys to create CMEK database/backup ([68551c2](https://github.com/googleapis/python-spanner/commit/68551c20cd101045f3d3fe948d04b99388f28c26))
+
+## [3.49.1](https://github.com/googleapis/python-spanner/compare/v3.49.0...v3.49.1) (2024-09-06)
+
+
+### Bug Fixes
+
+* Revert "chore(spanner): Issue[#1143](https://github.com/googleapis/python-spanner/issues/1143) - Update dependency" ([92f05ed](https://github.com/googleapis/python-spanner/commit/92f05ed04e49adfe0ad68bfa52e855baf8b17643))
+
+## [3.49.0](https://github.com/googleapis/python-spanner/compare/v3.48.0...v3.49.0) (2024-08-27)
+
+
+### Features
+
+* Create a few code snippets as examples for using Spanner Graph in Python ([#1186](https://github.com/googleapis/python-spanner/issues/1186)) ([f886ebd](https://github.com/googleapis/python-spanner/commit/f886ebd80a6422c2167cd440a2a646f52701b684))
+* **spanner:** Add resource reference annotation to backup schedules ([#1176](https://github.com/googleapis/python-spanner/issues/1176)) ([b503fc9](https://github.com/googleapis/python-spanner/commit/b503fc95d8abd47869a24f0e824a227a281282d6))
+* **spanner:** Add samples for instance partitions ([#1168](https://github.com/googleapis/python-spanner/issues/1168)) ([55f83dc](https://github.com/googleapis/python-spanner/commit/55f83dc5f776d436b30da6056a9cdcad3971ce39))
+
+
+### Bug Fixes
+
+* JsonObject init when called on JsonObject of list ([#1166](https://github.com/googleapis/python-spanner/issues/1166)) ([c4af6f0](https://github.com/googleapis/python-spanner/commit/c4af6f09a449f293768f70a84e805ffe08c6c2fb))
+
+## [3.48.0](https://github.com/googleapis/python-spanner/compare/v3.47.0...v3.48.0) (2024-07-30)
+
+
+### Features
+
+* Add field lock_hint in spanner.proto ([9609ad9](https://github.com/googleapis/python-spanner/commit/9609ad96d062fbd8fa4d622bfe8da119329facc0))
+* Add field order_by in spanner.proto ([9609ad9](https://github.com/googleapis/python-spanner/commit/9609ad96d062fbd8fa4d622bfe8da119329facc0))
+* Add support for Cloud Spanner Scheduled Backups ([9609ad9](https://github.com/googleapis/python-spanner/commit/9609ad96d062fbd8fa4d622bfe8da119329facc0))
+* **spanner:** Add support for txn changstream exclusion ([#1152](https://github.com/googleapis/python-spanner/issues/1152)) ([00ccb7a](https://github.com/googleapis/python-spanner/commit/00ccb7a5c1f246b5099265058a5e9875e6627024))
+
+
+### Bug Fixes
+
+* Allow protobuf 5.x ([9609ad9](https://github.com/googleapis/python-spanner/commit/9609ad96d062fbd8fa4d622bfe8da119329facc0))
+* **spanner:** Unskip emulator tests for proto ([#1145](https://github.com/googleapis/python-spanner/issues/1145)) ([cb74679](https://github.com/googleapis/python-spanner/commit/cb74679a05960293dd03eb6b74bff0f68a46395c))
+
+## [3.47.0](https://github.com/googleapis/python-spanner/compare/v3.46.0...v3.47.0) (2024-05-22)
+
+
+### Features
+
+* Add support for multi region encryption config ([#1136](https://github.com/googleapis/python-spanner/issues/1136)) ([bc71fe9](https://github.com/googleapis/python-spanner/commit/bc71fe98a5dfb1198a17d0d1a0b14b89f0ae1754))
+* **spanner:** Add support for Proto Columns ([#1084](https://github.com/googleapis/python-spanner/issues/1084)) ([3ca2689](https://github.com/googleapis/python-spanner/commit/3ca2689324406e0bd9a6b872eda4a23999115f0f))
+
+## [3.46.0](https://github.com/googleapis/python-spanner/compare/v3.45.0...v3.46.0) (2024-05-02)
+
+
+### Features
+
+* **spanner:** Adding EXPECTED_FULFILLMENT_PERIOD to the indicate instance creation times (with FULFILLMENT_PERIOD_NORMAL or FULFILLMENT_PERIOD_EXTENDED ENUM) with the extended instance creation time triggered by On-Demand Capacity Feature ([293ecda](https://github.com/googleapis/python-spanner/commit/293ecdad78b51f248f8d5c023bdba3bac998ea5c))
+
+
+### Documentation
+
+* Remove duplicate paramter description ([#1052](https://github.com/googleapis/python-spanner/issues/1052)) ([1164743](https://github.com/googleapis/python-spanner/commit/116474318d42a6f1ea0f9c2f82707e5dde281159))
+
+## [3.45.0](https://github.com/googleapis/python-spanner/compare/v3.44.0...v3.45.0) (2024-04-17)
+
+
+### Features
+
+* Add support for PG.OID in parameterized queries ([#1035](https://github.com/googleapis/python-spanner/issues/1035)) ([ea5efe4](https://github.com/googleapis/python-spanner/commit/ea5efe4d0bc2790b5172e43e1b66fa3997190adf))
+
+
+### Bug Fixes
+
+* Dates before 1000AD should use 4-digit years ([#1132](https://github.com/googleapis/python-spanner/issues/1132)) ([0ef6565](https://github.com/googleapis/python-spanner/commit/0ef65657de631d876636d11756237496b7713e22)), closes [#1131](https://github.com/googleapis/python-spanner/issues/1131)
+
+## [3.44.0](https://github.com/googleapis/python-spanner/compare/v3.43.0...v3.44.0) (2024-03-13)
+
+
+### Features
+
+* Add support of float32 type ([#1113](https://github.com/googleapis/python-spanner/issues/1113)) ([7e0b46a](https://github.com/googleapis/python-spanner/commit/7e0b46aba7c48f7f944c0fca0cb394551b8d60c1))
+* Changes for float32 in dbapi ([#1115](https://github.com/googleapis/python-spanner/issues/1115)) ([c9f4fbf](https://github.com/googleapis/python-spanner/commit/c9f4fbf2a42054ed61916fb544c5aca947a50598))
+
+
+### Bug Fixes
+
+* Correcting name of variable from `table_schema` to `schema_name` ([#1114](https://github.com/googleapis/python-spanner/issues/1114)) ([a92c6d3](https://github.com/googleapis/python-spanner/commit/a92c6d347f2ae84779ec8662280ea894d558a887))
+
+
+### Documentation
+
+* Add sample for managed autoscaler ([#1111](https://github.com/googleapis/python-spanner/issues/1111)) ([e73c671](https://github.com/googleapis/python-spanner/commit/e73c6718b23bf78a8f264419b2ba378f95fa2554))
+
+## [3.43.0](https://github.com/googleapis/python-spanner/compare/v3.42.0...v3.43.0) (2024-03-06)
+
+
+### Features
+
+* Add retry and timeout for batch dml ([#1107](https://github.com/googleapis/python-spanner/issues/1107)) ([4f6340b](https://github.com/googleapis/python-spanner/commit/4f6340b0930bb1b5430209c4a1ff196c42b834d0))
+* Add support for max commit delay ([#1050](https://github.com/googleapis/python-spanner/issues/1050)) ([d5acc26](https://github.com/googleapis/python-spanner/commit/d5acc263d86fcbde7d5f972930255119e2f60e76))
+* Exposing Spanner client in dbapi connection ([#1100](https://github.com/googleapis/python-spanner/issues/1100)) ([9299212](https://github.com/googleapis/python-spanner/commit/9299212fb8aa6ed27ca40367e8d5aaeeba80c675))
+* Include RENAME in DDL regex ([#1075](https://github.com/googleapis/python-spanner/issues/1075)) ([3669303](https://github.com/googleapis/python-spanner/commit/3669303fb50b4207975b380f356227aceaa1189a))
+* Support partitioned dml in dbapi ([#1103](https://github.com/googleapis/python-spanner/issues/1103)) ([3aab0ed](https://github.com/googleapis/python-spanner/commit/3aab0ed5ed3cd078835812dae183a333fe1d3a20))
+* Untyped param ([#1001](https://github.com/googleapis/python-spanner/issues/1001)) ([1750328](https://github.com/googleapis/python-spanner/commit/1750328bbc7f8a1125f8e0c38024ced8e195a1b9))
+
+
+### Documentation
+
+* Samples and tests for admin backup APIs ([#1105](https://github.com/googleapis/python-spanner/issues/1105)) ([5410c32](https://github.com/googleapis/python-spanner/commit/5410c32febbef48d4623d8023a6eb9f07a65c2f5))
+* Samples and tests for admin database APIs ([#1099](https://github.com/googleapis/python-spanner/issues/1099)) ([c25376c](https://github.com/googleapis/python-spanner/commit/c25376c8513af293c9db752ffc1970dbfca1c5b8))
+* Update all public documents to use auto-generated admin clients. ([#1109](https://github.com/googleapis/python-spanner/issues/1109)) ([d683a14](https://github.com/googleapis/python-spanner/commit/d683a14ccc574e49cefd4e2b2f8b6d9bfd3663ec))
+* Use autogenerated methods to get names from admin samples ([#1110](https://github.com/googleapis/python-spanner/issues/1110)) ([3ab74b2](https://github.com/googleapis/python-spanner/commit/3ab74b267b651b430e96712be22088e2859d7e79))
+
+## [3.42.0](https://github.com/googleapis/python-spanner/compare/v3.41.0...v3.42.0) (2024-01-30)
+
+
+### Features
+
+* Add FLOAT32 enum to TypeCode ([5b94dac](https://github.com/googleapis/python-spanner/commit/5b94dac507cebde2025d412da0a82373afdbdaf5))
+* Add max_commit_delay API ([#1078](https://github.com/googleapis/python-spanner/issues/1078)) ([ec87c08](https://github.com/googleapis/python-spanner/commit/ec87c082570259d6e16834326859a73f6ee8286a))
+* Add proto descriptors for proto and enum types in create/update/get database ddl requests ([5b94dac](https://github.com/googleapis/python-spanner/commit/5b94dac507cebde2025d412da0a82373afdbdaf5))
+* Fixing and refactoring transaction retry logic in dbapi. Also adding interceptors support for testing ([#1056](https://github.com/googleapis/python-spanner/issues/1056)) ([6640888](https://github.com/googleapis/python-spanner/commit/6640888b7845b7e273758ed9a6de3044e281f555))
+* Implementation of run partition query ([#1080](https://github.com/googleapis/python-spanner/issues/1080)) ([f3b23b2](https://github.com/googleapis/python-spanner/commit/f3b23b268766b6ff2704da18945a1b607a6c8909))
+
+
+### Bug Fixes
+
+* Few fixes in DBAPI ([#1085](https://github.com/googleapis/python-spanner/issues/1085)) ([1ed5a47](https://github.com/googleapis/python-spanner/commit/1ed5a47ce9cfe7be0805a2961b24d7b682cda2f3))
+* Small fix in description when metadata is not present in cursor's _result_set ([#1088](https://github.com/googleapis/python-spanner/issues/1088)) ([57643e6](https://github.com/googleapis/python-spanner/commit/57643e66a64d9befeb27fbbad360613ff69bd48c))
+* **spanner:** Add SpannerAsyncClient import to spanner_v1 package ([#1086](https://github.com/googleapis/python-spanner/issues/1086)) ([2d98b54](https://github.com/googleapis/python-spanner/commit/2d98b5478ee201d9fbb2775975f836def2817e33))
+
+
+### Documentation
+
+* Samples and tests for auto-generated createDatabase and createInstance APIs. ([#1065](https://github.com/googleapis/python-spanner/issues/1065)) ([16c510e](https://github.com/googleapis/python-spanner/commit/16c510eeed947beb87a134c64ca83a37f90b03fb))
+
+## [3.41.0](https://github.com/googleapis/python-spanner/compare/v3.40.1...v3.41.0) (2024-01-10)
+
+
+### Features
+
+* Add BatchWrite API ([#1011](https://github.com/googleapis/python-spanner/issues/1011)) ([d0e4ffc](https://github.com/googleapis/python-spanner/commit/d0e4ffccea071feaa2ca012a0e3f60a945ed1a13))
+* Add PG.OID type cod annotation ([#1023](https://github.com/googleapis/python-spanner/issues/1023)) ([2d59dd0](https://github.com/googleapis/python-spanner/commit/2d59dd09b8f14a37c780d8241a76e2f109ba88b0))
+* Add support for Directed Reads ([#1000](https://github.com/googleapis/python-spanner/issues/1000)) ([c4210b2](https://github.com/googleapis/python-spanner/commit/c4210b28466cfd88fffe546140a005a8e0a1af23))
+* Add support for Python 3.12 ([#1040](https://github.com/googleapis/python-spanner/issues/1040)) ([b28dc9b](https://github.com/googleapis/python-spanner/commit/b28dc9b0f97263d3926043fe5dfcb4cdc75ab35a))
+* Batch Write API implementation and samples ([#1027](https://github.com/googleapis/python-spanner/issues/1027)) ([aa36b07](https://github.com/googleapis/python-spanner/commit/aa36b075ebb13fa952045695a8f4eb6d21ae61ff))
+* Implementation for batch dml in dbapi ([#1055](https://github.com/googleapis/python-spanner/issues/1055)) ([7a92315](https://github.com/googleapis/python-spanner/commit/7a92315c8040dbf6f652974e19cd63abfd6cda2f))
+* Implementation for Begin and Rollback clientside statements ([#1041](https://github.com/googleapis/python-spanner/issues/1041)) ([15623cd](https://github.com/googleapis/python-spanner/commit/15623cda0ac1eb5dd71434c9064134cfa7800a79))
+* Implementation for partitioned query in dbapi ([#1067](https://github.com/googleapis/python-spanner/issues/1067)) ([63daa8a](https://github.com/googleapis/python-spanner/commit/63daa8a682824609b5a21699d95b0f41930635ef))
+* Implementation of client side statements that return ([#1046](https://github.com/googleapis/python-spanner/issues/1046)) ([bb5fa1f](https://github.com/googleapis/python-spanner/commit/bb5fa1fb75dba18965cddeacd77b6af0a05b4697))
+* Implementing client side statements in dbapi (starting with commit) ([#1037](https://github.com/googleapis/python-spanner/issues/1037)) ([eb41b0d](https://github.com/googleapis/python-spanner/commit/eb41b0da7c1e60561b46811d7307e879f071c6ce))
+* Introduce compatibility with native namespace packages ([#1036](https://github.com/googleapis/python-spanner/issues/1036)) ([5d80ab0](https://github.com/googleapis/python-spanner/commit/5d80ab0794216cd093a21989be0883b02eaa437a))
+* Return list of dictionaries for execute streaming sql ([#1003](https://github.com/googleapis/python-spanner/issues/1003)) ([b534a8a](https://github.com/googleapis/python-spanner/commit/b534a8aac116a824544d63a24e38f3d484e0d207))
+* **spanner:** Add autoscaling config to the instance proto ([#1022](https://github.com/googleapis/python-spanner/issues/1022)) ([4d490cf](https://github.com/googleapis/python-spanner/commit/4d490cf9de600b16a90a1420f8773b2ae927983d))
+* **spanner:** Add directed_read_option in spanner.proto ([#1030](https://github.com/googleapis/python-spanner/issues/1030)) ([84d662b](https://github.com/googleapis/python-spanner/commit/84d662b056ca4bd4177b3107ba463302b5362ff9))
+
+
+### Bug Fixes
+
+* Executing existing DDL statements on executemany statement execution ([#1032](https://github.com/googleapis/python-spanner/issues/1032)) ([07fbc45](https://github.com/googleapis/python-spanner/commit/07fbc45156a1b42a5e61c9c4b09923f239729aa8))
+* Fix for flaky test_read_timestamp_client_side_autocommit test ([#1071](https://github.com/googleapis/python-spanner/issues/1071)) ([0406ded](https://github.com/googleapis/python-spanner/commit/0406ded8b0abcdc93a7a2422247a14260f5c620c))
+* Require google-cloud-core >= 1.4.4 ([#1015](https://github.com/googleapis/python-spanner/issues/1015)) ([a2f87b9](https://github.com/googleapis/python-spanner/commit/a2f87b9d9591562877696526634f0c7c4dd822dd))
+* Require proto-plus 1.22.2 for python 3.11 ([#880](https://github.com/googleapis/python-spanner/issues/880)) ([7debe71](https://github.com/googleapis/python-spanner/commit/7debe7194b9f56b14daeebb99f48787174a9471b))
+* Use `retry_async` instead of `retry` in async client ([#1044](https://github.com/googleapis/python-spanner/issues/1044)) ([1253ae4](https://github.com/googleapis/python-spanner/commit/1253ae46011daa3a0b939e22e957dd3ab5179210))
+
+
+### Documentation
+
+* Minor formatting ([498dba2](https://github.com/googleapis/python-spanner/commit/498dba26a7c1a1cb710a92c0167272ff5c0eef27))
+
+## [3.40.1](https://github.com/googleapis/python-spanner/compare/v3.40.0...v3.40.1) (2023-08-17)
+
+
+### Bug Fixes
+
+* Fix to reload table when checking if table exists ([#1002](https://github.com/googleapis/python-spanner/issues/1002)) ([53bda62](https://github.com/googleapis/python-spanner/commit/53bda62c4996d622b7a11e860841c16e4097bded))
+
+## [3.40.0](https://github.com/googleapis/python-spanner/compare/v3.39.0...v3.40.0) (2023-08-04)
+
+
+### Features
+
+* Enable leader aware routing by default. This update contains performance optimisations that will reduce the latency of read/write transactions that originate from a region other than the default leader region. ([e8dbfe7](https://github.com/googleapis/python-spanner/commit/e8dbfe709d72a04038e05166adbad275642f1f22))
+
+## [3.39.0](https://github.com/googleapis/python-spanner/compare/v3.38.0...v3.39.0) (2023-08-02)
+
+
+### Features
+
+* Foreign key on delete cascade action testing and samples ([#910](https://github.com/googleapis/python-spanner/issues/910)) ([681c8ee](https://github.com/googleapis/python-spanner/commit/681c8eead40582addf75e02c159ea1ff9d6de85e))
+
+
+### Documentation
+
+* Minor formatting ([#991](https://github.com/googleapis/python-spanner/issues/991)) ([60efc42](https://github.com/googleapis/python-spanner/commit/60efc426cf26c4863d81743a5545c5f296308815))
+
+## [3.38.0](https://github.com/googleapis/python-spanner/compare/v3.37.0...v3.38.0) (2023-07-21)
+
+
+### Features
+
+* Set LAR as False ([#980](https://github.com/googleapis/python-spanner/issues/980)) ([75e8a59](https://github.com/googleapis/python-spanner/commit/75e8a59ff5d7f15088b9c4ba5961345746e35bcc))
+
+## [3.37.0](https://github.com/googleapis/python-spanner/compare/v3.36.0...v3.37.0) (2023-07-21)
+
+
+### Features
+
+* Enable leader aware routing by default. This update contains performance optimisations that will reduce the latency of read/write transactions that originate from a region other than the default leader region. ([402b101](https://github.com/googleapis/python-spanner/commit/402b1015a58f0982d5e3f9699297db82d3cdd7b2))
+
+
+### Bug Fixes
+
+* Add async context manager return types ([#967](https://github.com/googleapis/python-spanner/issues/967)) ([7e2e712](https://github.com/googleapis/python-spanner/commit/7e2e712f9ee1e8643c5c59dbd1d15b13b3c0f3ea))
+
+
+### Documentation
+
+* Fix documentation structure ([#949](https://github.com/googleapis/python-spanner/issues/949)) ([b73e47b](https://github.com/googleapis/python-spanner/commit/b73e47bb43f5767957685400c7876d6a8b7489a3))
+
+## [3.36.0](https://github.com/googleapis/python-spanner/compare/v3.35.1...v3.36.0) (2023-06-06)
+
+
+### Features
+
+* Add DdlStatementActionInfo and add actions to UpdateDatabaseDdlMetadata ([#948](https://github.com/googleapis/python-spanner/issues/948)) ([1ca6874](https://github.com/googleapis/python-spanner/commit/1ca687464fe65a19370a460556acc0957d693399))
+* Testing for fgac-pg ([#902](https://github.com/googleapis/python-spanner/issues/902)) ([ad1f527](https://github.com/googleapis/python-spanner/commit/ad1f5277dfb3b6a6c7458ff2ace5f724e56360c1))
+
+## [3.35.1](https://github.com/googleapis/python-spanner/compare/v3.35.0...v3.35.1) (2023-05-25)
+
+
+### Bug Fixes
+
+* Catch rst stream error for all transactions ([#934](https://github.com/googleapis/python-spanner/issues/934)) ([d317d2e](https://github.com/googleapis/python-spanner/commit/d317d2e1b882d9cf576bfc6c195fa9df7c518c4e))
+
+## [3.35.0](https://github.com/googleapis/python-spanner/compare/v3.34.0...v3.35.0) (2023-05-16)
+
+
+### Features
+
+* Add support for updateDatabase in Cloud Spanner ([#914](https://github.com/googleapis/python-spanner/issues/914)) ([6c7ad29](https://github.com/googleapis/python-spanner/commit/6c7ad2921d2bf886b538f7e24e86397c188620c8))
+
+## [3.34.0](https://github.com/googleapis/python-spanner/compare/v3.33.0...v3.34.0) (2023-05-16)
+
+
+### Features
+
+* Add support for UpdateDatabase in Cloud Spanner ([#941](https://github.com/googleapis/python-spanner/issues/941)) ([38fb890](https://github.com/googleapis/python-spanner/commit/38fb890e34762f104ca97e612e62d4f59e752133))
+
+
+### Bug Fixes
+
+* Upgrade version of sqlparse ([#943](https://github.com/googleapis/python-spanner/issues/943)) ([df57ce6](https://github.com/googleapis/python-spanner/commit/df57ce6f00b6a992024c9f1bd6948905ae1e5cf4))
+
+## [3.33.0](https://github.com/googleapis/python-spanner/compare/v3.32.0...v3.33.0) (2023-04-27)
+
+
+### Features
+
+* Leader Aware Routing ([#899](https://github.com/googleapis/python-spanner/issues/899)) ([f9fefad](https://github.com/googleapis/python-spanner/commit/f9fefad6ee2e16804d109d8bfbb613062f57ea65))
+
+## [3.32.0](https://github.com/googleapis/python-spanner/compare/v3.31.0...v3.32.0) (2023-04-25)
+
+
+### Features
+
+* Enable instance-level connection ([#931](https://github.com/googleapis/python-spanner/issues/931)) ([d6963e2](https://github.com/googleapis/python-spanner/commit/d6963e2142d880e94c6f3e9eb27ed1ac310bd1d0))
+
+## [3.31.0](https://github.com/googleapis/python-spanner/compare/v3.30.0...v3.31.0) (2023-04-12)
+
+
+### Features
+
+* Add databoost enabled property for batch transactions ([#892](https://github.com/googleapis/python-spanner/issues/892)) ([ffb3915](https://github.com/googleapis/python-spanner/commit/ffb39158be5a551b698739c003ee6125a11c1c7a))
+
+
+### Bug Fixes
+
+* Set databoost false ([#928](https://github.com/googleapis/python-spanner/issues/928)) ([c9ed9d2](https://github.com/googleapis/python-spanner/commit/c9ed9d24d19594dfff57c979fa3bf68d84bbc3b5))
+
+## [3.30.0](https://github.com/googleapis/python-spanner/compare/v3.29.0...v3.30.0) (2023-03-28)
+
+
+### Features
+
+* Pass custom Client object to dbapi ([#911](https://github.com/googleapis/python-spanner/issues/911)) ([52b1a0a](https://github.com/googleapis/python-spanner/commit/52b1a0af0103a5b91aa5bf9ea1138319bdb90d79))
+
+## [3.29.0](https://github.com/googleapis/python-spanner/compare/v3.28.0...v3.29.0) (2023-03-23)
+
+
+### Features
+
+* Adding new fields for Serverless analytics ([#906](https://github.com/googleapis/python-spanner/issues/906)) ([2a5a636](https://github.com/googleapis/python-spanner/commit/2a5a636fc296ad0a7f86ace6a5f361db1e2ee26d))
+
+
+### Bug Fixes
+
+* Correcting the proto field Id for field data_boost_enabled ([#915](https://github.com/googleapis/python-spanner/issues/915)) ([428aa1e](https://github.com/googleapis/python-spanner/commit/428aa1e5e4458649033a5566dc3017d2fadbd2a0))
+
+
+### Documentation
+
+* Fix formatting of request arg in docstring ([#918](https://github.com/googleapis/python-spanner/issues/918)) ([c022bf8](https://github.com/googleapis/python-spanner/commit/c022bf859a3ace60c0a9ddb86896bc83f85e327f))
+
+## [3.28.0](https://github.com/googleapis/python-spanner/compare/v3.27.1...v3.28.0) (2023-02-28)
+
+
+### Features
+
+* Enable "rest" transport in Python for services supporting numeric enums ([#897](https://github.com/googleapis/python-spanner/issues/897)) ([c21a0d5](https://github.com/googleapis/python-spanner/commit/c21a0d5f4600818ca79cd4e199a2245683c33467))
+
+## [3.27.1](https://github.com/googleapis/python-spanner/compare/v3.27.0...v3.27.1) (2023-01-30)
+
+
+### Bug Fixes
+
+* Add context manager return types ([830f325](https://github.com/googleapis/python-spanner/commit/830f325c4ab9ab1eb8d53edca723d000c23ee0d7))
+* Change fgac database role tags ([#888](https://github.com/googleapis/python-spanner/issues/888)) ([ae92f0d](https://github.com/googleapis/python-spanner/commit/ae92f0dd8a78f2397977354525b4be4b2b02aec3))
+* Fix for database name in batch create request ([#883](https://github.com/googleapis/python-spanner/issues/883)) ([5e50beb](https://github.com/googleapis/python-spanner/commit/5e50bebdd1d43994b3d83568641d1dff1c419cc8))
+
+
+### Documentation
+
+* Add documentation for enums ([830f325](https://github.com/googleapis/python-spanner/commit/830f325c4ab9ab1eb8d53edca723d000c23ee0d7))
+
+## [3.27.0](https://github.com/googleapis/python-spanner/compare/v3.26.0...v3.27.0) (2023-01-10)
+
+
+### Features
+
+* Add support for python 3.11 ([#879](https://github.com/googleapis/python-spanner/issues/879)) ([4b8c2cf](https://github.com/googleapis/python-spanner/commit/4b8c2cf6c30892ad977e3db6c3a147a93af649e6))
+* Add typing to proto.Message based class attributes ([4683d10](https://github.com/googleapis/python-spanner/commit/4683d10c75e24aa222591d6001e07aacb6b4ee46))
+
+
+### Bug Fixes
+
+* Add dict typing for client_options ([4683d10](https://github.com/googleapis/python-spanner/commit/4683d10c75e24aa222591d6001e07aacb6b4ee46))
+* **deps:** Require google-api-core >=1.34.0, >=2.11.0 ([4683d10](https://github.com/googleapis/python-spanner/commit/4683d10c75e24aa222591d6001e07aacb6b4ee46))
+* Drop packaging dependency ([4683d10](https://github.com/googleapis/python-spanner/commit/4683d10c75e24aa222591d6001e07aacb6b4ee46))
+* Drop usage of pkg_resources ([4683d10](https://github.com/googleapis/python-spanner/commit/4683d10c75e24aa222591d6001e07aacb6b4ee46))
+* Fix timeout default values ([4683d10](https://github.com/googleapis/python-spanner/commit/4683d10c75e24aa222591d6001e07aacb6b4ee46))
+
+
+### Documentation
+
+* **samples:** Snippetgen handling of repeated enum field ([4683d10](https://github.com/googleapis/python-spanner/commit/4683d10c75e24aa222591d6001e07aacb6b4ee46))
+* **samples:** Snippetgen should call await on the operation coroutine before calling result ([4683d10](https://github.com/googleapis/python-spanner/commit/4683d10c75e24aa222591d6001e07aacb6b4ee46))
+
+## [3.26.0](https://github.com/googleapis/python-spanner/compare/v3.25.0...v3.26.0) (2022-12-15)
+
+
+### Features
+
+* Inline Begin transction for RW transactions ([#840](https://github.com/googleapis/python-spanner/issues/840)) ([c2456be](https://github.com/googleapis/python-spanner/commit/c2456bed513dc4ab8954e5227605fca12e776b63))
+
+
+### Bug Fixes
+
+* Fix for binding of pinging and bursty pool with database role ([#871](https://github.com/googleapis/python-spanner/issues/871)) ([89da17e](https://github.com/googleapis/python-spanner/commit/89da17efccdf4f686f73f87f997128a96c614839))
+
+## [3.25.0](https://github.com/googleapis/python-spanner/compare/v3.24.0...v3.25.0) (2022-12-13)
+
+
+### Features
+
+* Fgac support and samples ([#867](https://github.com/googleapis/python-spanner/issues/867)) ([24fa244](https://github.com/googleapis/python-spanner/commit/24fa244ceb13263a7c2ce752bf7a4170bcabec6f))
+
+## [3.24.0](https://github.com/googleapis/python-spanner/compare/v3.23.0...v3.24.0) (2022-11-30)
+
+
+### Features
+
+* Add snippets for Spanner DML with returning clause ([#811](https://github.com/googleapis/python-spanner/issues/811)) ([62e55b5](https://github.com/googleapis/python-spanner/commit/62e55b5e98530e53483003a6729e1b69b7ee2d9c))
+* Add support and tests for DML returning clauses ([#805](https://github.com/googleapis/python-spanner/issues/805)) ([81505cd](https://github.com/googleapis/python-spanner/commit/81505cd221d74936c46755e81e9e04fce828f8a2))
+
+## [3.23.0](https://github.com/googleapis/python-spanner/compare/v3.22.1...v3.23.0) (2022-11-07)
+
+
+### Features
+
+* Adding support and samples for jsonb ([#851](https://github.com/googleapis/python-spanner/issues/851)) ([268924d](https://github.com/googleapis/python-spanner/commit/268924d29fa2577103abb9b6cdc91585d7c349ce))
+* Support request priorities ([#834](https://github.com/googleapis/python-spanner/issues/834)) ([ef2159c](https://github.com/googleapis/python-spanner/commit/ef2159c554b866955c9030099b208d4d9d594e83))
+* Support requiest options in !autocommit mode ([#838](https://github.com/googleapis/python-spanner/issues/838)) ([ab768e4](https://github.com/googleapis/python-spanner/commit/ab768e45efe7334823ec6bcdccfac2a6dde73bd7))
+* Update result_set.proto to return undeclared parameters in ExecuteSql API ([#841](https://github.com/googleapis/python-spanner/issues/841)) ([0aa4cad](https://github.com/googleapis/python-spanner/commit/0aa4cadb1ba8590cdfab5573b869e8b16e8050f8))
+* Update transaction.proto to include different lock modes ([#845](https://github.com/googleapis/python-spanner/issues/845)) ([c191296](https://github.com/googleapis/python-spanner/commit/c191296df5a0322e6050786e59159999eff16cdd))
+
+
+### Bug Fixes
+
+* **deps:** Allow protobuf 3.19.5 ([#839](https://github.com/googleapis/python-spanner/issues/839)) ([06725fc](https://github.com/googleapis/python-spanner/commit/06725fcf7fb216ad0cffb2cb568f8da38243c32e))
+
+
+### Documentation
+
+* Describe DB API and transactions retry mechanism ([#844](https://github.com/googleapis/python-spanner/issues/844)) ([30a0666](https://github.com/googleapis/python-spanner/commit/30a0666decf3ac638568c613facbf999efec6f19)), closes [#791](https://github.com/googleapis/python-spanner/issues/791)
+
+## [3.22.1](https://github.com/googleapis/python-spanner/compare/v3.22.0...v3.22.1) (2022-10-04)
+
+
+### Bug Fixes
+
+* **deps:** Require protobuf >= 3.20.2 ([#830](https://github.com/googleapis/python-spanner/issues/830)) ([4d71563](https://github.com/googleapis/python-spanner/commit/4d7156376f4633de6c1a2bfd25ba97126386ebd0))
+
+
+### Documentation
+
+* **samples:** add samples for CMMR phase 2 ([4282340](https://github.com/googleapis/python-spanner/commit/4282340bc2c3a34496c59c33f5c64ff76dceda4c))
+
+## [3.22.0](https://github.com/googleapis/python-spanner/compare/v3.21.0...v3.22.0) (2022-09-26)
+
+
+### Features
+
+* Adding reason, domain, metadata & error_details fields in Custom Exceptions for additional info ([#804](https://github.com/googleapis/python-spanner/issues/804)) ([2a74060](https://github.com/googleapis/python-spanner/commit/2a740607a00cb622ac9ce4005c12afd52114b4a5))
+
+## [3.21.0](https://github.com/googleapis/python-spanner/compare/v3.20.0...v3.21.0) (2022-09-16)
+
+
+### Features
+
+* Add custom instance config operations ([#810](https://github.com/googleapis/python-spanner/issues/810)) ([f07333f](https://github.com/googleapis/python-spanner/commit/f07333fb7238e79b32f480a8c82c61fc2fb26dee))
+
+## [3.20.0](https://github.com/googleapis/python-spanner/compare/v3.19.0...v3.20.0) (2022-08-30)
+
+
+### Features
+
+* Adds TypeAnnotationCode PG_JSONB ([#792](https://github.com/googleapis/python-spanner/issues/792)) ([6a661d4](https://github.com/googleapis/python-spanner/commit/6a661d4492bcb77abee60095ffc2cfdc06b48124))
+
+
+### Bug Fixes
+
+* if JsonObject serialized to None then return `null_value` instead of `string_value` ([#771](https://github.com/googleapis/python-spanner/issues/771)) ([82170b5](https://github.com/googleapis/python-spanner/commit/82170b521f0da1ba5aaf064ba9ee50c74fe21a86))
+
+## [3.19.0](https://github.com/googleapis/python-spanner/compare/v3.18.0...v3.19.0) (2022-08-17)
+
+
+### Features
+
+* support JSON object consisting of an array. ([#782](https://github.com/googleapis/python-spanner/issues/782)) ([92a3169](https://github.com/googleapis/python-spanner/commit/92a3169b59bae527d77ecc19f798998650ca4192))
+
+## [3.18.0](https://github.com/googleapis/python-spanner/compare/v3.17.0...v3.18.0) (2022-08-12)
+
+
+### Features
+
+* Add ListDatabaseRoles API to support role based access control ([#774](https://github.com/googleapis/python-spanner/issues/774)) ([3867882](https://github.com/googleapis/python-spanner/commit/3867882a14c9a2edeb4a47d5a77ec10b2e8e35da))
+
+
+### Bug Fixes
+
+* **deps:** allow protobuf < 5.0.0 ([eee5f31](https://github.com/googleapis/python-spanner/commit/eee5f31b2fe977d542c711831f4e6d06f743fab4))
+* **deps:** require proto-plus >= 1.22.0 ([eee5f31](https://github.com/googleapis/python-spanner/commit/eee5f31b2fe977d542c711831f4e6d06f743fab4))
+* target new spanner db admin service config ([8c73cb3](https://github.com/googleapis/python-spanner/commit/8c73cb3ff1093996dfd88a2361e7c73cad321fd6))
+
+## [3.17.0](https://github.com/googleapis/python-spanner/compare/v3.16.0...v3.17.0) (2022-07-19)
+
+
+### Features
+
+* add audience parameter ([60db146](https://github.com/googleapis/python-spanner/commit/60db146f71e4f7e28f23e63ae085a56d3b9b20ad))
+* add Session creator role ([60db146](https://github.com/googleapis/python-spanner/commit/60db146f71e4f7e28f23e63ae085a56d3b9b20ad))
+* Adding two new fields for Instance create_time and update_time ([60db146](https://github.com/googleapis/python-spanner/commit/60db146f71e4f7e28f23e63ae085a56d3b9b20ad))
+
+
+### Bug Fixes
+
+* **deps:** require google-api-core>=1.32.0,>=2.8.0 ([#739](https://github.com/googleapis/python-spanner/issues/739)) ([60db146](https://github.com/googleapis/python-spanner/commit/60db146f71e4f7e28f23e63ae085a56d3b9b20ad))
+
+
+### Documentation
+
+* clarify transaction semantics ([60db146](https://github.com/googleapis/python-spanner/commit/60db146f71e4f7e28f23e63ae085a56d3b9b20ad))
+
+## [3.16.0](https://github.com/googleapis/python-spanner/compare/v3.15.1...v3.16.0) (2022-07-11)
+
+
+### Features
+
+* Automated Release Blessing ([#767](https://github.com/googleapis/python-spanner/issues/767)) ([19caf44](https://github.com/googleapis/python-spanner/commit/19caf44489e0af915405466960cf83bea4d3a579))
+* python typing ([#646](https://github.com/googleapis/python-spanner/issues/646)) ([169019f](https://github.com/googleapis/python-spanner/commit/169019f283b4fc1f82be928de8e61477bd7f33ca))
+
+
+### Bug Fixes
+
+* [@421](https://github.com/421) ([#769](https://github.com/googleapis/python-spanner/issues/769)) ([58640a1](https://github.com/googleapis/python-spanner/commit/58640a1e013fb24dde403706ae32c851112128c9))
+* add pause for the staleness test ([#762](https://github.com/googleapis/python-spanner/issues/762)) ([bb7f1db](https://github.com/googleapis/python-spanner/commit/bb7f1db57a0d06800ff7c81336756676fc7ec109))
+* require python 3.7+ ([#768](https://github.com/googleapis/python-spanner/issues/768)) ([f2c273d](https://github.com/googleapis/python-spanner/commit/f2c273d592ddc7d2c5de5ee6284d3b4ecba8a3c1))
+
+## [3.15.1](https://github.com/googleapis/python-spanner/compare/v3.15.0...v3.15.1) (2022-06-17)
+
+
+### Bug Fixes
+
+* don't use a list for empty arguments ([#750](https://github.com/googleapis/python-spanner/issues/750)) ([5d8b055](https://github.com/googleapis/python-spanner/commit/5d8b0558f43a3505f62f9a8eae4228c91c6f0ada))
+
+## [3.15.0](https://github.com/googleapis/python-spanner/compare/v3.14.1...v3.15.0) (2022-06-17)
+
+
+### Features
+
+* Add support for Postgresql dialect ([#741](https://github.com/googleapis/python-spanner/issues/741)) ([d2551b0](https://github.com/googleapis/python-spanner/commit/d2551b028ea2ad4e2eaa1c97ca7bac4683c4fdec))
+
+## [3.14.1](https://github.com/googleapis/python-spanner/compare/v3.14.0...v3.14.1) (2022-06-08)
+
+
+### Bug Fixes
+
+* **deps:** require protobuf <4.0.0dev ([#731](https://github.com/googleapis/python-spanner/issues/731)) ([8004ae5](https://github.com/googleapis/python-spanner/commit/8004ae54b4a6e6a7b19d8da1de46f3526da881ff))
+
+
+### Documentation
+
+* fix changelog header to consistent size ([#732](https://github.com/googleapis/python-spanner/issues/732)) ([97b6d37](https://github.com/googleapis/python-spanner/commit/97b6d37c78a325c404d649a1db5e7337beedefb5))
+
+## [3.14.0](https://github.com/googleapis/python-spanner/compare/v3.13.0...v3.14.0) (2022-04-20)
+
+
+### Features
+
+* add support for Cross region backup proto changes ([#691](https://github.com/googleapis/python-spanner/issues/691)) ([8ac62cb](https://github.com/googleapis/python-spanner/commit/8ac62cb83ee5525d6233dcc34919dcbf9471461b))
+* add support for spanner copy backup feature ([#600](https://github.com/googleapis/python-spanner/issues/600)) ([97faf6c](https://github.com/googleapis/python-spanner/commit/97faf6c11f985f128446bc7d9e99a22362bd1bc1))
+* AuditConfig for IAM v1 ([7642eba](https://github.com/googleapis/python-spanner/commit/7642eba1d9c66525ea1ca6f36dd91c759ed3cbde))
+
+
+### Bug Fixes
+
+* add NOT_FOUND error check in __exit__ method of SessionCheckout. ([#718](https://github.com/googleapis/python-spanner/issues/718)) ([265e207](https://github.com/googleapis/python-spanner/commit/265e20711510aafc956552e9684ab7a39074bf70))
+* **deps:** require google-api-core>=1.31.5, >=2.3.2 ([#685](https://github.com/googleapis/python-spanner/issues/685)) ([7a46a27](https://github.com/googleapis/python-spanner/commit/7a46a27bacbdcb1e72888bd93dfce93c439ceae2))
+* **deps:** require grpc-google-iam-v1 >=0.12.4 ([7642eba](https://github.com/googleapis/python-spanner/commit/7642eba1d9c66525ea1ca6f36dd91c759ed3cbde))
+* **deps:** require proto-plus>=1.15.0 ([7a46a27](https://github.com/googleapis/python-spanner/commit/7a46a27bacbdcb1e72888bd93dfce93c439ceae2))
+
+
+### Documentation
+
+* add generated snippets ([#680](https://github.com/googleapis/python-spanner/issues/680)) ([f21dac4](https://github.com/googleapis/python-spanner/commit/f21dac4c47cb6a6a85fd282b8e5de966b467b1b6))
+
+## [3.13.0](https://github.com/googleapis/python-spanner/compare/v3.12.1...v3.13.0) (2022-02-04)
+
+
+### Features
+
+* add api key support ([819be92](https://github.com/googleapis/python-spanner/commit/819be92e46f63133724dd0d3f5e57b20e33e299e))
+* add database dialect ([#671](https://github.com/googleapis/python-spanner/issues/671)) ([819be92](https://github.com/googleapis/python-spanner/commit/819be92e46f63133724dd0d3f5e57b20e33e299e))
+
+
+### Bug Fixes
+
+* add support for row_count in cursor. ([#675](https://github.com/googleapis/python-spanner/issues/675)) ([d431339](https://github.com/googleapis/python-spanner/commit/d431339069874abf345347b777b3811464925e46))
+* resolve DuplicateCredentialArgs error when using credentials_file ([#676](https://github.com/googleapis/python-spanner/issues/676)) ([39ff137](https://github.com/googleapis/python-spanner/commit/39ff13796adc13b6702d003e4d549775f8cef202))
+
+## [3.12.1](https://www.github.com/googleapis/python-spanner/compare/v3.12.0...v3.12.1) (2022-01-06)
+
+
+### Bug Fixes
+
+* Django and SQLAlchemy APIs are failing to use rowcount ([#654](https://www.github.com/googleapis/python-spanner/issues/654)) ([698260e](https://www.github.com/googleapis/python-spanner/commit/698260e4597badd38e5ad77dda43506a016826d8))
+
+## [3.12.0](https://www.github.com/googleapis/python-spanner/compare/v3.11.1...v3.12.0) (2021-11-25)
+
+
+### Features
+
+* add context manager support in client ([5ae4be8](https://www.github.com/googleapis/python-spanner/commit/5ae4be8ce0a429b33b31a119d7079ce4deb50ca2))
+* add context manager support in client ([#637](https://www.github.com/googleapis/python-spanner/issues/637)) ([5ae4be8](https://www.github.com/googleapis/python-spanner/commit/5ae4be8ce0a429b33b31a119d7079ce4deb50ca2))
+* add support for python 3.10 ([#626](https://www.github.com/googleapis/python-spanner/issues/626)) ([17ca61b](https://www.github.com/googleapis/python-spanner/commit/17ca61b3a8d3f70c400fb57be5edc9073079b9e4)), closes [#623](https://www.github.com/googleapis/python-spanner/issues/623)
+* **db_api:** add an ability to set ReadOnly/ReadWrite connection mode ([#475](https://www.github.com/googleapis/python-spanner/issues/475)) ([cd3b950](https://www.github.com/googleapis/python-spanner/commit/cd3b950e042cd55d5f4a7234dd79c60d49faa15b))
+* **db_api:** make rowcount property NotImplemented ([#603](https://www.github.com/googleapis/python-spanner/issues/603)) ([b5a567f](https://www.github.com/googleapis/python-spanner/commit/b5a567f1db8762802182a3319c16b6456bb208d8))
+* **db_api:** raise exception with message for executemany() ([#595](https://www.github.com/googleapis/python-spanner/issues/595)) ([95908f6](https://www.github.com/googleapis/python-spanner/commit/95908f67e81554858060f0831d10ff05d149fbba))
+* **db_api:** support JSON data type ([#627](https://www.github.com/googleapis/python-spanner/issues/627)) ([d760c2c](https://www.github.com/googleapis/python-spanner/commit/d760c2c240cc80fadaaba9d3a4a3847e10c3c093))
+* **db_api:** support stale reads ([#584](https://www.github.com/googleapis/python-spanner/issues/584)) ([8ca868c](https://www.github.com/googleapis/python-spanner/commit/8ca868c3b3f487c1ef4f655aedd0ac2ca449c103))
+
+
+### Bug Fixes
+
+* **db_api:** emit warning instead of an exception for `rowcount` property ([#628](https://www.github.com/googleapis/python-spanner/issues/628)) ([62ff9ae](https://www.github.com/googleapis/python-spanner/commit/62ff9ae80a9972b0062aca0e9bb3affafb8ec490))
+* **deps:** drop packaging dependency ([5ae4be8](https://www.github.com/googleapis/python-spanner/commit/5ae4be8ce0a429b33b31a119d7079ce4deb50ca2))
+* **deps:** require google-api-core >= 1.28.0 ([5ae4be8](https://www.github.com/googleapis/python-spanner/commit/5ae4be8ce0a429b33b31a119d7079ce4deb50ca2))
+* improper types in pagers generation ([5ae4be8](https://www.github.com/googleapis/python-spanner/commit/5ae4be8ce0a429b33b31a119d7079ce4deb50ca2))
+
+
+### Performance Improvements
+
+* **dbapi:** set headers correctly for dynamic routing ([#644](https://www.github.com/googleapis/python-spanner/issues/644)) ([d769ff8](https://www.github.com/googleapis/python-spanner/commit/d769ff803c41394c9c175e3de772039d816b9cb5))
+
+
+### Documentation
+
+* list oneofs in docstring ([5ae4be8](https://www.github.com/googleapis/python-spanner/commit/5ae4be8ce0a429b33b31a119d7079ce4deb50ca2))
+
+## [3.11.1](https://www.github.com/googleapis/python-spanner/compare/v3.11.0...v3.11.1) (2021-10-04)
+
+
+### Bug Fixes
+
+* add support for json data type ([#593](https://www.github.com/googleapis/python-spanner/issues/593)) ([bc5ddc3](https://www.github.com/googleapis/python-spanner/commit/bc5ddc3fb1eb7eff9a266fe3d1c3c8a4a6fd3763))
+* remove database_version_time param from test_instance_list_backups ([#609](https://www.github.com/googleapis/python-spanner/issues/609)) ([db63aee](https://www.github.com/googleapis/python-spanner/commit/db63aee2b15fd812d78d980bc302d9a217ca711e))
+
+## [3.11.0](https://www.github.com/googleapis/python-spanner/compare/v3.10.0...v3.11.0) (2021-09-29)
+
+
+### Features
+
+* adding support for spanner request options tags ([#276](https://www.github.com/googleapis/python-spanner/issues/276)) ([e16f376](https://www.github.com/googleapis/python-spanner/commit/e16f37649b0023da48ec55a2e65261ee930b9ec4))
+
+## [3.10.0](https://www.github.com/googleapis/python-spanner/compare/v3.9.0...v3.10.0) (2021-09-17)
+
+
+### Features
+
+* set a separate user agent for the DB API ([#566](https://www.github.com/googleapis/python-spanner/issues/566)) ([b5f977e](https://www.github.com/googleapis/python-spanner/commit/b5f977ebf61527914af3c8356aeeae9418114215))
+
+
+### Bug Fixes
+
+* **db_api:** move connection validation into a separate method ([#543](https://www.github.com/googleapis/python-spanner/issues/543)) ([237ae41](https://www.github.com/googleapis/python-spanner/commit/237ae41d0c0db61f157755cf04f84ef2d146972c))
+* handle google.api_core.exceptions.OutOfRange exception and throw InegrityError as expected by dbapi standards ([#571](https://www.github.com/googleapis/python-spanner/issues/571)) ([dffcf13](https://www.github.com/googleapis/python-spanner/commit/dffcf13d10a0cfb6b61231ae907367563f8eed87))
+
+## [3.9.0](https://www.github.com/googleapis/python-spanner/compare/v3.8.0...v3.9.0) (2021-08-26)
+
+
+### Features
+
+* add support for JSON type ([#353](https://www.github.com/googleapis/python-spanner/issues/353)) ([b1dd04d](https://www.github.com/googleapis/python-spanner/commit/b1dd04d89df6339a9624378c31f9ab26a6114a54))
+
+## [3.8.0](https://www.github.com/googleapis/python-spanner/compare/v3.7.0...v3.8.0) (2021-08-15)
+
+
+### Features
+
+* use DML batches in `executemany()` method ([#412](https://www.github.com/googleapis/python-spanner/issues/412)) ([cbb4ee3](https://www.github.com/googleapis/python-spanner/commit/cbb4ee3eca9ac878b4f3cd78cfcfe8fc1acb86f9))
+
+
+### Bug Fixes
+
+* **samples:** batch_update() results processing error ([#484](https://www.github.com/googleapis/python-spanner/issues/484)) ([bdd5f8b](https://www.github.com/googleapis/python-spanner/commit/bdd5f8b201d1b442837d4fca1d631fe171e276b9))
+
## [3.7.0](https://www.github.com/googleapis/python-spanner/compare/v3.6.0...v3.7.0) (2021-07-29)
@@ -208,7 +1073,7 @@
* DB-API driver + unit tests ([#160](https://www.github.com/googleapis/python-spanner/issues/160)) ([2493fa1](https://www.github.com/googleapis/python-spanner/commit/2493fa1725d2d613f6c064637a4e215ee66255e3))
* migrate to v2.0.0 ([#147](https://www.github.com/googleapis/python-spanner/issues/147)) ([bf4b278](https://www.github.com/googleapis/python-spanner/commit/bf4b27827494e3dc33b1e4333dfe147a36a486b3))
-### [1.19.1](https://www.github.com/googleapis/python-spanner/compare/v1.19.0...v1.19.1) (2020-10-13)
+## [1.19.1](https://www.github.com/googleapis/python-spanner/compare/v1.19.0...v1.19.1) (2020-10-13)
### Bug Fixes
@@ -255,7 +1120,7 @@
* add samples from spanner/cloud-client ([#117](https://www.github.com/googleapis/python-spanner/issues/117)) ([8910771](https://www.github.com/googleapis/python-spanner/commit/891077105d5093a73caf96683d10afef2cd17823)), closes [#804](https://www.github.com/googleapis/python-spanner/issues/804) [#815](https://www.github.com/googleapis/python-spanner/issues/815) [#818](https://www.github.com/googleapis/python-spanner/issues/818) [#887](https://www.github.com/googleapis/python-spanner/issues/887) [#914](https://www.github.com/googleapis/python-spanner/issues/914) [#922](https://www.github.com/googleapis/python-spanner/issues/922) [#928](https://www.github.com/googleapis/python-spanner/issues/928) [#962](https://www.github.com/googleapis/python-spanner/issues/962) [#992](https://www.github.com/googleapis/python-spanner/issues/992) [#1004](https://www.github.com/googleapis/python-spanner/issues/1004) [#1035](https://www.github.com/googleapis/python-spanner/issues/1035) [#1055](https://www.github.com/googleapis/python-spanner/issues/1055) [#1063](https://www.github.com/googleapis/python-spanner/issues/1063) [#1093](https://www.github.com/googleapis/python-spanner/issues/1093) [#1107](https://www.github.com/googleapis/python-spanner/issues/1107) [#1121](https://www.github.com/googleapis/python-spanner/issues/1121) [#1158](https://www.github.com/googleapis/python-spanner/issues/1158) [#1138](https://www.github.com/googleapis/python-spanner/issues/1138) [#1186](https://www.github.com/googleapis/python-spanner/issues/1186) [#1192](https://www.github.com/googleapis/python-spanner/issues/1192) [#1207](https://www.github.com/googleapis/python-spanner/issues/1207) [#1254](https://www.github.com/googleapis/python-spanner/issues/1254) [#1316](https://www.github.com/googleapis/python-spanner/issues/1316) [#1354](https://www.github.com/googleapis/python-spanner/issues/1354) [#1376](https://www.github.com/googleapis/python-spanner/issues/1376) [#1377](https://www.github.com/googleapis/python-spanner/issues/1377) [#1402](https://www.github.com/googleapis/python-spanner/issues/1402) [#1406](https://www.github.com/googleapis/python-spanner/issues/1406) [#1425](https://www.github.com/googleapis/python-spanner/issues/1425) [#1441](https://www.github.com/googleapis/python-spanner/issues/1441) [#1464](https://www.github.com/googleapis/python-spanner/issues/1464) [#1519](https://www.github.com/googleapis/python-spanner/issues/1519) [#1548](https://www.github.com/googleapis/python-spanner/issues/1548) [#1633](https://www.github.com/googleapis/python-spanner/issues/1633) [#1742](https://www.github.com/googleapis/python-spanner/issues/1742) [#1836](https://www.github.com/googleapis/python-spanner/issues/1836) [#1846](https://www.github.com/googleapis/python-spanner/issues/1846) [#1872](https://www.github.com/googleapis/python-spanner/issues/1872) [#1980](https://www.github.com/googleapis/python-spanner/issues/1980) [#2068](https://www.github.com/googleapis/python-spanner/issues/2068) [#2153](https://www.github.com/googleapis/python-spanner/issues/2153) [#2224](https://www.github.com/googleapis/python-spanner/issues/2224) [#2198](https://www.github.com/googleapis/python-spanner/issues/2198) [#2251](https://www.github.com/googleapis/python-spanner/issues/2251) [#2295](https://www.github.com/googleapis/python-spanner/issues/2295) [#2356](https://www.github.com/googleapis/python-spanner/issues/2356) [#2392](https://www.github.com/googleapis/python-spanner/issues/2392) [#2439](https://www.github.com/googleapis/python-spanner/issues/2439) [#2535](https://www.github.com/googleapis/python-spanner/issues/2535) [#2005](https://www.github.com/googleapis/python-spanner/issues/2005) [#2721](https://www.github.com/googleapis/python-spanner/issues/2721) [#3093](https://www.github.com/googleapis/python-spanner/issues/3093) [#3101](https://www.github.com/googleapis/python-spanner/issues/3101) [#2806](https://www.github.com/googleapis/python-spanner/issues/2806) [#3377](https://www.github.com/googleapis/python-spanner/issues/3377)
* typo fix ([#109](https://www.github.com/googleapis/python-spanner/issues/109)) ([63b4324](https://www.github.com/googleapis/python-spanner/commit/63b432472613bd80e234ee9c9f73906db2f0a52b))
-### [1.17.1](https://www.github.com/googleapis/python-spanner/compare/v1.17.0...v1.17.1) (2020-06-24)
+## [1.17.1](https://www.github.com/googleapis/python-spanner/compare/v1.17.0...v1.17.1) (2020-06-24)
### Documentation
@@ -290,7 +1155,7 @@
* add keepalive changes to synth.py ([#55](https://www.github.com/googleapis/python-spanner/issues/55)) ([805bbb7](https://www.github.com/googleapis/python-spanner/commit/805bbb766fd9c019f528e2f8ed1379d997622d03))
* pass gRPC config options to gRPC channel creation ([#26](https://www.github.com/googleapis/python-spanner/issues/26)) ([6c9a1ba](https://www.github.com/googleapis/python-spanner/commit/6c9a1badfed610a18454137e1b45156872914e7e))
-### [1.15.1](https://www.github.com/googleapis/python-spanner/compare/v1.15.0...v1.15.1) (2020-04-08)
+## [1.15.1](https://www.github.com/googleapis/python-spanner/compare/v1.15.0...v1.15.1) (2020-04-08)
### Bug Fixes
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index d19bc28fc9..76e9061cd2 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -22,7 +22,7 @@ In order to add a feature:
documentation.
- The feature must work fully on the following CPython versions:
- 3.6, 3.7, 3.8 and 3.9 on both UNIX and Windows.
+ 3.9, 3.10, 3.11, 3.12 and 3.13 on both UNIX and Windows.
- The feature must not add unnecessary dependencies (where
"unnecessary" is of course subjective, but new dependencies should
@@ -50,9 +50,9 @@ You'll have to create a development environment using a Git checkout:
# Configure remotes such that you can pull changes from the googleapis/python-spanner
# repository into your local repository.
$ git remote add upstream git@github.com:googleapis/python-spanner.git
- # fetch and merge changes from upstream into master
+ # fetch and merge changes from upstream into main
$ git fetch upstream
- $ git merge upstream/master
+ $ git merge upstream/main
Now your local repo is set up such that you will push changes to your GitHub
repo, from which you can submit a pull request.
@@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests.
- To run a single unit test::
- $ nox -s unit-3.9 -- -k
+ $ nox -s unit-3.13 -- -k
.. note::
@@ -110,12 +110,12 @@ Coding Style
variables::
export GOOGLE_CLOUD_TESTING_REMOTE="upstream"
- export GOOGLE_CLOUD_TESTING_BRANCH="master"
+ export GOOGLE_CLOUD_TESTING_BRANCH="main"
By doing this, you are specifying the location of the most up-to-date
- version of ``python-spanner``. The the suggested remote name ``upstream``
- should point to the official ``googleapis`` checkout and the
- the branch should be the main branch on that remote (``master``).
+ version of ``python-spanner``. The
+ remote name ``upstream`` should point to the official ``googleapis``
+ checkout and the branch should be the default branch on that remote (``main``).
- This repository contains configuration for the
`pre-commit `__ tool, which automates checking
@@ -143,12 +143,12 @@ Running System Tests
$ nox -s system
# Run a single system test
- $ nox -s system-3.8 -- -k
+ $ nox -s system-3.12 -- -k
.. note::
- System tests are only configured to run under Python 3.8.
+ System tests are only configured to run under Python 3.12.
For expediency, we do not run them in older versions of Python 3.
This alone will not run the tests. You'll need to change some local
@@ -195,11 +195,11 @@ configure them just like the System Tests.
# Run all tests in a folder
$ cd samples/samples
- $ nox -s py-3.8
+ $ nox -s py-3.9
# Run a single sample test
$ cd samples/samples
- $ nox -s py-3.8 -- -k
+ $ nox -s py-3.9 -- -k
********************************************
Note About ``README`` as it pertains to PyPI
@@ -209,7 +209,7 @@ The `description on PyPI`_ for the project comes directly from the
``README``. Due to the reStructuredText (``rst``) parser used by
PyPI, relative links which will work on GitHub (e.g. ``CONTRIBUTING.rst``
instead of
-``https://github.com/googleapis/python-spanner/blob/master/CONTRIBUTING.rst``)
+``https://github.com/googleapis/python-spanner/blob/main/CONTRIBUTING.rst``)
may cause problems creating links or rendering the description.
.. _description on PyPI: https://pypi.org/project/google-cloud-spanner
@@ -221,23 +221,25 @@ Supported Python Versions
We support:
-- `Python 3.6`_
-- `Python 3.7`_
-- `Python 3.8`_
- `Python 3.9`_
+- `Python 3.10`_
+- `Python 3.11`_
+- `Python 3.12`_
+- `Python 3.13`_
-.. _Python 3.6: https://docs.python.org/3.6/
-.. _Python 3.7: https://docs.python.org/3.7/
-.. _Python 3.8: https://docs.python.org/3.8/
.. _Python 3.9: https://docs.python.org/3.9/
+.. _Python 3.10: https://docs.python.org/3.10/
+.. _Python 3.11: https://docs.python.org/3.11/
+.. _Python 3.12: https://docs.python.org/3.12/
+.. _Python 3.13: https://docs.python.org/3.13/
Supported versions can be found in our ``noxfile.py`` `config`_.
-.. _config: https://github.com/googleapis/python-spanner/blob/master/noxfile.py
+.. _config: https://github.com/googleapis/python-spanner/blob/main/noxfile.py
-We also explicitly decided to support Python 3 beginning with version 3.6.
+We also explicitly decided to support Python 3 beginning with version 3.9.
Reasons for this include:
- Encouraging use of newest versions of Python 3
diff --git a/MANIFEST.in b/MANIFEST.in
index e783f4c620..d6814cd600 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-# Copyright 2020 Google LLC
+# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/README.rst b/README.rst
index d18dbcfbc6..2b1f7b0acd 100644
--- a/README.rst
+++ b/README.rst
@@ -16,13 +16,13 @@ workloads.
- `Product Documentation`_
.. |GA| image:: https://img.shields.io/badge/support-GA-gold.svg
- :target: https://github.com/googleapis/google-cloud-python/blob/master/README.rst#general-availability
+ :target: https://github.com/googleapis/google-cloud-python/blob/main/README.rst#general-availability
.. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-spanner.svg
:target: https://pypi.org/project/google-cloud-spanner/
.. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-spanner.svg
:target: https://pypi.org/project/google-cloud-spanner/
.. _Cloud Spanner: https://cloud.google.com/spanner/
-.. _Client Library Documentation: https://googleapis.dev/python/spanner/latest
+.. _Client Library Documentation: https://cloud.google.com/python/docs/reference/spanner/latest
.. _Product Documentation: https://cloud.google.com/spanner/docs
Quick Start
@@ -56,12 +56,15 @@ dependencies.
Supported Python Versions
^^^^^^^^^^^^^^^^^^^^^^^^^
-Python >= 3.5
+Python >= 3.9
Deprecated Python Versions
^^^^^^^^^^^^^^^^^^^^^^^^^^
-Python == 2.7. Python 2.7 support will be removed on January 1, 2020.
-
+Python == 2.7.
+Python == 3.5.
+Python == 3.6.
+Python == 3.7.
+Python == 3.8.
Mac/Linux
^^^^^^^^^
@@ -233,6 +236,38 @@ if any of the records does not already exist.
)
+Connection API
+--------------
+Connection API represents a wrap-around for Python Spanner API, written in accordance with PEP-249, and provides a simple way of communication with a Spanner database through connection objects:
+
+.. code:: python
+
+ from google.cloud.spanner_dbapi.connection import connect
+
+ connection = connect("instance-id", "database-id")
+ connection.autocommit = True
+
+ cursor = connection.cursor()
+ cursor.execute("SELECT * FROM table_name")
+
+ result = cursor.fetchall()
+
+
+If using [fine-grained access controls](https://cloud.google.com/spanner/docs/access-with-fgac) you can pass a ``database_role`` argument to connect as that role:
+
+.. code:: python
+
+ connection = connect("instance-id", "database-id", database_role='your-role')
+
+
+Aborted Transactions Retry Mechanism
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In ``!autocommit`` mode, transactions can be aborted due to transient errors. In most cases retry of an aborted transaction solves the problem. To simplify it, connection tracks SQL statements, executed in the current transaction. In case the transaction aborted, the connection initiates a new one and re-executes all the statements. In the process, the connection checks that retried statements are returning the same results that the original statements did. If results are different, the transaction is dropped, as the underlying data changed, and auto retry is impossible.
+
+Auto-retry of aborted transactions is enabled only for ``!autocommit`` mode, as in ``autocommit`` mode transactions are never aborted.
+
+
Next Steps
~~~~~~~~~~
diff --git a/testing/constraints-3.7.txt b/benchmark/__init__.py
similarity index 100%
rename from testing/constraints-3.7.txt
rename to benchmark/__init__.py
diff --git a/benchmark/benchwrapper/README.md b/benchmark/benchwrapper/README.md
new file mode 100644
index 0000000000..613e289b05
--- /dev/null
+++ b/benchmark/benchwrapper/README.md
@@ -0,0 +1,10 @@
+# Benchwrapper
+
+A small gRPC wrapper around the Spanner client library. This allows the
+benchmarking code to prod at Spanner without speaking Python.
+
+## Running
+Run the following commands from python-spanner/ directory.
+```
+export SPANNER_EMULATOR_HOST=localhost:9010
+python3 -m benchmark.benchwrapper.main --port 8081
diff --git a/benchmark/benchwrapper/__init__.py b/benchmark/benchwrapper/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/benchmark/benchwrapper/main.py b/benchmark/benchwrapper/main.py
new file mode 100644
index 0000000000..83ad72b97a
--- /dev/null
+++ b/benchmark/benchwrapper/main.py
@@ -0,0 +1,201 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""The gRPC Benchwrapper around Python Client Library.
+Usage:
+ # Start the emulator using either docker or gcloud CLI.
+
+ # Set up instance and load data into database.
+
+ # Set up environment variables.
+ $ export SPANNER_EMULATOR_HOST=localhost:9010
+
+ # Run the benchmark from python-spanner/ directory.
+ $ python3 -m benchmark.benchwrapper.main --port 8081
+
+"""
+
+from concurrent import futures
+from optparse import OptionParser
+
+import os
+
+import benchmark.benchwrapper.proto.spanner_pb2 as spanner_messages
+import benchmark.benchwrapper.proto.spanner_pb2_grpc as spanner_service
+
+from google.cloud import spanner
+
+import grpc
+
+################################## CONSTANTS ##################################
+
+SPANNER_PROJECT = "someproject"
+SPANNER_INSTANCE = "someinstance"
+SPANNER_DATABASE = "somedatabase"
+
+###############################################################################
+
+
+class SpannerBenchWrapperService(spanner_service.SpannerBenchWrapperServicer):
+ """Benchwrapper Servicer class to implement Read, Insert and Update
+ methods.
+
+ :type project_id: str
+ :param project_id: Spanner project.
+
+ :type instance_id: str
+ :param instance_id: The ID of instance that owns the database.
+
+ :type database_id: str
+ :param database_id: the ID of the database.
+ """
+
+ def __init__(self,
+ project_id=SPANNER_PROJECT,
+ instance_id=SPANNER_INSTANCE,
+ database_id=SPANNER_DATABASE) -> None:
+
+ spanner_client = spanner.Client(project_id)
+ instance = spanner_client.instance(instance_id)
+ self.database = instance.database(database_id)
+
+ super().__init__()
+
+ def Read(self, request, _):
+ """Read represents operations like Go's ReadOnlyTransaction.Query,
+ Java's ReadOnlyTransaction.executeQuery, Python's snapshot.read, and
+ Node's Transaction.Read.
+
+ It will typically be used to read many items.
+
+ :type request:
+ :class: `benchmark.benchwrapper.proto.spanner_pb2.ReadQuery`
+ :param request: A ReadQuery request object.
+
+ :rtype: :class:`benchmark.benchwrapper.proto.spanner_pb2.EmptyResponse`
+ :returns: An EmptyResponse object.
+ """
+ with self.database.snapshot() as snapshot:
+ # Stream the response to the query.
+ list(snapshot.execute_sql(request.query))
+
+ return spanner_messages.EmptyResponse()
+
+ def Insert(self, request, _):
+ """Insert represents operations like Go's Client.Apply, Java's
+ DatabaseClient.writeAtLeastOnce, Python's transaction.commit, and Node's
+ Transaction.Commit.
+
+ It will typically be used to insert many items.
+
+ :type request:
+ :class: `benchmark.benchwrapper.proto.spanner_pb2.InsertQuery`
+ :param request: An InsertQuery request object.
+
+ :rtype: :class:`benchmark.benchwrapper.proto.spanner_pb2.EmptyResponse`
+ :returns: An EmptyResponse object.
+ """
+ with self.database.batch() as batch:
+ batch.insert(
+ table="Singers",
+ columns=("SingerId", "FirstName", "LastName"),
+ values=[(i.id, i.first_name, i.last_name) for i in request.singers],
+ )
+
+ return spanner_messages.EmptyResponse()
+
+ def Update(self, request, _):
+ """Update represents operations like Go's
+ ReadWriteTransaction.BatchUpdate, Java's TransactionRunner.run,
+ Python's Batch.update, and Node's Transaction.BatchUpdate.
+
+ It will typically be used to update many items.
+
+ :type request:
+ :class: `benchmark.benchwrapper.proto.spanner_pb2.UpdateQuery`
+ :param request: An UpdateQuery request object.
+
+ :rtype: :class:`benchmark.benchwrapper.proto.spanner_pb2.EmptyResponse`
+ :returns: An EmptyResponse object.
+ """
+ self.database.run_in_transaction(self.update_singers, request.queries)
+
+ return spanner_messages.EmptyResponse()
+
+ def update_singers(self, transaction, stmts):
+ """Method to execute batch_update in a transaction.
+
+ :type transaction:
+ :class: `google.cloud.spanner_v1.transaction.Transaction`
+ :param transaction: A Spanner Transaction object.
+ :type stmts:
+ :class: `google.protobuf.pyext._message.RepeatedScalarContainer`
+ :param stmts: Statements which are update queries.
+ """
+ transaction.batch_update(stmts)
+
+
+def get_opts():
+ """Parse command line arguments."""
+ parser = OptionParser()
+ parser.add_option("-p", "--port", help="Specify a port to run on")
+
+ opts, _ = parser.parse_args()
+
+ return opts
+
+
+def validate_opts(opts):
+ """Validate command line arguments."""
+ if opts.port is None:
+ raise ValueError("Please specify a valid port, e.g., -p 5000 or "
+ "--port 5000.")
+
+
+def start_grpc_server(num_workers, port):
+ """Method to start the GRPC server."""
+ # Instantiate the GRPC server.
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=num_workers))
+
+ # Instantiate benchwrapper service.
+ spanner_benchwrapper_service = SpannerBenchWrapperService()
+
+ # Add benchwrapper servicer to server.
+ spanner_service.add_SpannerBenchWrapperServicer_to_server(
+ spanner_benchwrapper_service, server)
+
+ # Form the server address.
+ addr = "localhost:{0}".format(port)
+
+ # Add the port, and start the server.
+ server.add_insecure_port(addr)
+ server.start()
+ server.wait_for_termination()
+
+
+def serve():
+ """Driver method."""
+ if "SPANNER_EMULATOR_HOST" not in os.environ:
+ raise ValueError("This benchmarking server only works when connected "
+ "to an emulator. Please set SPANNER_EMULATOR_HOST.")
+
+ opts = get_opts()
+
+ validate_opts(opts)
+
+ start_grpc_server(10, opts.port)
+
+
+if __name__ == "__main__":
+ serve()
diff --git a/benchmark/benchwrapper/proto/README.md b/benchmark/benchwrapper/proto/README.md
new file mode 100644
index 0000000000..9c9bae4637
--- /dev/null
+++ b/benchmark/benchwrapper/proto/README.md
@@ -0,0 +1,4 @@
+# Regenerating protos
+Run the following command from python-spanner/ directory.
+```
+python3 -m grpc_tools.protoc -I . --python_out=. --grpc_python_out=. benchmark/benchwrapper/proto/*.proto
diff --git a/benchmark/benchwrapper/proto/__init__.py b/benchmark/benchwrapper/proto/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/benchmark/benchwrapper/proto/spanner.proto b/benchmark/benchwrapper/proto/spanner.proto
new file mode 100644
index 0000000000..6ffe363328
--- /dev/null
+++ b/benchmark/benchwrapper/proto/spanner.proto
@@ -0,0 +1,73 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package spanner_bench;
+
+option py_generic_services = true;
+
+message Singer {
+ int64 id = 1;
+ string first_name = 2;
+ string last_name = 3;
+ string singer_info = 4;
+}
+
+message Album {
+ int64 id = 1;
+ int64 singer_id = 2;
+ string album_title = 3;
+}
+
+message ReadQuery {
+ // The query to use in the read call.
+ string query = 1;
+}
+
+message InsertQuery {
+ // The query to use in the insert call.
+ repeated Singer singers = 1;
+ repeated Album albums = 2;
+}
+
+message UpdateQuery {
+ // The queries to use in the update call.
+ repeated string queries = 1;
+}
+
+message EmptyResponse {}
+
+service SpannerBenchWrapper {
+ // Read represents operations like Go's ReadOnlyTransaction.Query, Java's
+ // ReadOnlyTransaction.executeQuery, Python's snapshot.read, and Node's
+ // Transaction.Read.
+ //
+ // It will typically be used to read many items.
+ rpc Read(ReadQuery) returns (EmptyResponse) {}
+
+ // Insert represents operations like Go's Client.Apply, Java's
+ // DatabaseClient.writeAtLeastOnce, Python's transaction.commit, and Node's
+ // Transaction.Commit.
+ //
+ // It will typically be used to insert many items.
+ rpc Insert(InsertQuery) returns (EmptyResponse) {}
+
+ // Update represents operations like Go's ReadWriteTransaction.BatchUpdate,
+ // Java's TransactionRunner.run, Python's Batch.update, and Node's
+ // Transaction.BatchUpdate.
+ //
+ // It will typically be used to update many items.
+ rpc Update(UpdateQuery) returns (EmptyResponse) {}
+}
diff --git a/benchmark/benchwrapper/proto/spanner_pb2.py b/benchmark/benchwrapper/proto/spanner_pb2.py
new file mode 100644
index 0000000000..e2d9b1a825
--- /dev/null
+++ b/benchmark/benchwrapper/proto/spanner_pb2.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: benchmark/benchwrapper/proto/spanner.proto
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import service as _service
+from google.protobuf import service_reflection
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n*benchmark/benchwrapper/proto/spanner.proto\x12\rspanner_bench\"P\n\x06Singer\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x12\n\nfirst_name\x18\x02 \x01(\t\x12\x11\n\tlast_name\x18\x03 \x01(\t\x12\x13\n\x0bsinger_info\x18\x04 \x01(\t\";\n\x05\x41lbum\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x11\n\tsinger_id\x18\x02 \x01(\x03\x12\x13\n\x0b\x61lbum_title\x18\x03 \x01(\t\"\x1a\n\tReadQuery\x12\r\n\x05query\x18\x01 \x01(\t\"[\n\x0bInsertQuery\x12&\n\x07singers\x18\x01 \x03(\x0b\x32\x15.spanner_bench.Singer\x12$\n\x06\x61lbums\x18\x02 \x03(\x0b\x32\x14.spanner_bench.Album\"\x1e\n\x0bUpdateQuery\x12\x0f\n\x07queries\x18\x01 \x03(\t\"\x0f\n\rEmptyResponse2\xe3\x01\n\x13SpannerBenchWrapper\x12@\n\x04Read\x12\x18.spanner_bench.ReadQuery\x1a\x1c.spanner_bench.EmptyResponse\"\x00\x12\x44\n\x06Insert\x12\x1a.spanner_bench.InsertQuery\x1a\x1c.spanner_bench.EmptyResponse\"\x00\x12\x44\n\x06Update\x12\x1a.spanner_bench.UpdateQuery\x1a\x1c.spanner_bench.EmptyResponse\"\x00\x42\x03\x90\x01\x01\x62\x06proto3')
+
+
+
+_SINGER = DESCRIPTOR.message_types_by_name['Singer']
+_ALBUM = DESCRIPTOR.message_types_by_name['Album']
+_READQUERY = DESCRIPTOR.message_types_by_name['ReadQuery']
+_INSERTQUERY = DESCRIPTOR.message_types_by_name['InsertQuery']
+_UPDATEQUERY = DESCRIPTOR.message_types_by_name['UpdateQuery']
+_EMPTYRESPONSE = DESCRIPTOR.message_types_by_name['EmptyResponse']
+Singer = _reflection.GeneratedProtocolMessageType('Singer', (_message.Message,), {
+ 'DESCRIPTOR' : _SINGER,
+ '__module__' : 'benchmark.benchwrapper.proto.spanner_pb2'
+ # @@protoc_insertion_point(class_scope:spanner_bench.Singer)
+ })
+_sym_db.RegisterMessage(Singer)
+
+Album = _reflection.GeneratedProtocolMessageType('Album', (_message.Message,), {
+ 'DESCRIPTOR' : _ALBUM,
+ '__module__' : 'benchmark.benchwrapper.proto.spanner_pb2'
+ # @@protoc_insertion_point(class_scope:spanner_bench.Album)
+ })
+_sym_db.RegisterMessage(Album)
+
+ReadQuery = _reflection.GeneratedProtocolMessageType('ReadQuery', (_message.Message,), {
+ 'DESCRIPTOR' : _READQUERY,
+ '__module__' : 'benchmark.benchwrapper.proto.spanner_pb2'
+ # @@protoc_insertion_point(class_scope:spanner_bench.ReadQuery)
+ })
+_sym_db.RegisterMessage(ReadQuery)
+
+InsertQuery = _reflection.GeneratedProtocolMessageType('InsertQuery', (_message.Message,), {
+ 'DESCRIPTOR' : _INSERTQUERY,
+ '__module__' : 'benchmark.benchwrapper.proto.spanner_pb2'
+ # @@protoc_insertion_point(class_scope:spanner_bench.InsertQuery)
+ })
+_sym_db.RegisterMessage(InsertQuery)
+
+UpdateQuery = _reflection.GeneratedProtocolMessageType('UpdateQuery', (_message.Message,), {
+ 'DESCRIPTOR' : _UPDATEQUERY,
+ '__module__' : 'benchmark.benchwrapper.proto.spanner_pb2'
+ # @@protoc_insertion_point(class_scope:spanner_bench.UpdateQuery)
+ })
+_sym_db.RegisterMessage(UpdateQuery)
+
+EmptyResponse = _reflection.GeneratedProtocolMessageType('EmptyResponse', (_message.Message,), {
+ 'DESCRIPTOR' : _EMPTYRESPONSE,
+ '__module__' : 'benchmark.benchwrapper.proto.spanner_pb2'
+ # @@protoc_insertion_point(class_scope:spanner_bench.EmptyResponse)
+ })
+_sym_db.RegisterMessage(EmptyResponse)
+
+_SPANNERBENCHWRAPPER = DESCRIPTOR.services_by_name['SpannerBenchWrapper']
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+ DESCRIPTOR._options = None
+ DESCRIPTOR._serialized_options = b'\220\001\001'
+ _SINGER._serialized_start=61
+ _SINGER._serialized_end=141
+ _ALBUM._serialized_start=143
+ _ALBUM._serialized_end=202
+ _READQUERY._serialized_start=204
+ _READQUERY._serialized_end=230
+ _INSERTQUERY._serialized_start=232
+ _INSERTQUERY._serialized_end=323
+ _UPDATEQUERY._serialized_start=325
+ _UPDATEQUERY._serialized_end=355
+ _EMPTYRESPONSE._serialized_start=357
+ _EMPTYRESPONSE._serialized_end=372
+ _SPANNERBENCHWRAPPER._serialized_start=375
+ _SPANNERBENCHWRAPPER._serialized_end=602
+SpannerBenchWrapper = service_reflection.GeneratedServiceType('SpannerBenchWrapper', (_service.Service,), dict(
+ DESCRIPTOR = _SPANNERBENCHWRAPPER,
+ __module__ = 'benchmark.benchwrapper.proto.spanner_pb2'
+ ))
+
+SpannerBenchWrapper_Stub = service_reflection.GeneratedServiceStubType('SpannerBenchWrapper_Stub', (SpannerBenchWrapper,), dict(
+ DESCRIPTOR = _SPANNERBENCHWRAPPER,
+ __module__ = 'benchmark.benchwrapper.proto.spanner_pb2'
+ ))
+
+
+# @@protoc_insertion_point(module_scope)
diff --git a/benchmark/benchwrapper/proto/spanner_pb2_grpc.py b/benchmark/benchwrapper/proto/spanner_pb2_grpc.py
new file mode 100644
index 0000000000..bc1792f30b
--- /dev/null
+++ b/benchmark/benchwrapper/proto/spanner_pb2_grpc.py
@@ -0,0 +1,147 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+
+from benchmark.benchwrapper.proto import spanner_pb2 as benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2
+
+
+class SpannerBenchWrapperStub(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.Read = channel.unary_unary(
+ '/spanner_bench.SpannerBenchWrapper/Read',
+ request_serializer=benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.ReadQuery.SerializeToString,
+ response_deserializer=benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.EmptyResponse.FromString,
+ )
+ self.Insert = channel.unary_unary(
+ '/spanner_bench.SpannerBenchWrapper/Insert',
+ request_serializer=benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.InsertQuery.SerializeToString,
+ response_deserializer=benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.EmptyResponse.FromString,
+ )
+ self.Update = channel.unary_unary(
+ '/spanner_bench.SpannerBenchWrapper/Update',
+ request_serializer=benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.UpdateQuery.SerializeToString,
+ response_deserializer=benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.EmptyResponse.FromString,
+ )
+
+
+class SpannerBenchWrapperServicer(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def Read(self, request, context):
+ """Read represents operations like Go's ReadOnlyTransaction.Query, Java's
+ ReadOnlyTransaction.executeQuery, Python's snapshot.read, and Node's
+ Transaction.Read.
+
+ It will typically be used to read many items.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def Insert(self, request, context):
+ """Insert represents operations like Go's Client.Apply, Java's
+ DatabaseClient.writeAtLeastOnce, Python's transaction.commit, and Node's
+ Transaction.Commit.
+
+ It will typically be used to insert many items.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def Update(self, request, context):
+ """Update represents operations like Go's ReadWriteTransaction.BatchUpdate,
+ Java's TransactionRunner.run, Python's Batch.update, and Node's
+ Transaction.BatchUpdate.
+
+ It will typically be used to update many items.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_SpannerBenchWrapperServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'Read': grpc.unary_unary_rpc_method_handler(
+ servicer.Read,
+ request_deserializer=benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.ReadQuery.FromString,
+ response_serializer=benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.EmptyResponse.SerializeToString,
+ ),
+ 'Insert': grpc.unary_unary_rpc_method_handler(
+ servicer.Insert,
+ request_deserializer=benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.InsertQuery.FromString,
+ response_serializer=benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.EmptyResponse.SerializeToString,
+ ),
+ 'Update': grpc.unary_unary_rpc_method_handler(
+ servicer.Update,
+ request_deserializer=benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.UpdateQuery.FromString,
+ response_serializer=benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.EmptyResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'spanner_bench.SpannerBenchWrapper', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class SpannerBenchWrapper(object):
+ """Missing associated documentation comment in .proto file."""
+
+ @staticmethod
+ def Read(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/spanner_bench.SpannerBenchWrapper/Read',
+ benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.ReadQuery.SerializeToString,
+ benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.EmptyResponse.FromString,
+ options, channel_credentials,
+ insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def Insert(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/spanner_bench.SpannerBenchWrapper/Insert',
+ benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.InsertQuery.SerializeToString,
+ benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.EmptyResponse.FromString,
+ options, channel_credentials,
+ insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def Update(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/spanner_bench.SpannerBenchWrapper/Update',
+ benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.UpdateQuery.SerializeToString,
+ benchmark_dot_benchwrapper_dot_proto_dot_spanner__pb2.EmptyResponse.FromString,
+ options, channel_credentials,
+ insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
diff --git a/docs/api-reference.rst b/docs/api-reference.rst
deleted file mode 100644
index 41046f78bf..0000000000
--- a/docs/api-reference.rst
+++ /dev/null
@@ -1,34 +0,0 @@
-API Reference
-=============
-
-The following classes and methods constitute the Spanner client.
-Most likely, you will be interacting almost exclusively with these:
-
-.. toctree::
- :maxdepth: 1
-
- client-api
- instance-api
- database-api
- table-api
- session-api
- keyset-api
- snapshot-api
- batch-api
- transaction-api
- streamed-api
-
-
-The classes and methods above depend on the following, lower-level
-classes and methods. Documentation for these is provided for completion,
-and some advanced use cases may wish to interact with these directly:
-
-.. toctree::
- :maxdepth: 1
-
- spanner_v1/services
- spanner_v1/types
- spanner_admin_database_v1/services
- spanner_admin_database_v1/types
- spanner_admin_instance_v1/services
- spanner_admin_instance_v1/types
diff --git a/docs/client-usage.rst b/docs/client-usage.rst
index ce13bf4aa0..7ba3390e59 100644
--- a/docs/client-usage.rst
+++ b/docs/client-usage.rst
@@ -1,5 +1,5 @@
-Spanner Client
-==============
+Spanner Client Usage
+====================
.. _spanner-client:
diff --git a/docs/conf.py b/docs/conf.py
index 1d4a1c0b91..78e49ed55c 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2021 Google LLC
+# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -76,8 +76,8 @@
# The encoding of source files.
# source_encoding = 'utf-8-sig'
-# The master toctree document.
-master_doc = "index"
+# The root toctree document.
+root_doc = "index"
# General information about the project.
project = "google-cloud-spanner"
@@ -110,6 +110,7 @@
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
+ "**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
@@ -279,7 +280,7 @@
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
- master_doc,
+ root_doc,
"google-cloud-spanner.tex",
"google-cloud-spanner Documentation",
author,
@@ -314,7 +315,7 @@
# (source start file, name, description, authors, manual section).
man_pages = [
(
- master_doc,
+ root_doc,
"google-cloud-spanner",
"google-cloud-spanner Documentation",
[author],
@@ -333,7 +334,7 @@
# dir menu entry, description, category)
texinfo_documents = [
(
- master_doc,
+ root_doc,
"google-cloud-spanner",
"google-cloud-spanner Documentation",
author,
@@ -360,7 +361,10 @@
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
- "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
+ "google.api_core": (
+ "https://googleapis.dev/python/google-api-core/latest/",
+ None,
+ ),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
diff --git a/docs/database-usage.rst b/docs/database-usage.rst
index 629f1ab28a..afcfa06cb2 100644
--- a/docs/database-usage.rst
+++ b/docs/database-usage.rst
@@ -1,5 +1,5 @@
-Database Admin
-==============
+Database Admin Usage
+====================
After creating an :class:`~google.cloud.spanner_v1.instance.Instance`, you can
interact with individual databases for that instance.
diff --git a/docs/index.rst b/docs/index.rst
index a4ab1b27d7..0de0483409 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -5,27 +5,48 @@
Usage Documentation
-------------------
.. toctree::
- :maxdepth: 1
- :titlesonly:
+ :maxdepth: 2
client-usage
- instance-usage
- database-usage
table-usage
batch-usage
snapshot-usage
transaction-usage
+ database-usage
+ instance-usage
+
API Documentation
-----------------
.. toctree::
:maxdepth: 1
:titlesonly:
- api-reference
advanced-session-pool-topics
opentelemetry-tracing
+ spanner_v1/client
+ spanner_v1/instance
+ spanner_v1/database
+ spanner_v1/table
+ spanner_v1/session
+ spanner_v1/keyset
+ spanner_v1/snapshot
+ spanner_v1/batch
+ spanner_v1/transaction
+ spanner_v1/streamed
+
+ spanner_v1/services_
+ spanner_v1/types_
+ spanner_admin_database_v1/services_
+ spanner_admin_database_v1/types_
+ spanner_admin_database_v1/database_admin
+ spanner_admin_instance_v1/services_
+ spanner_admin_instance_v1/types_
+ spanner_admin_instance_v1/instance_admin
+
+
+
Changelog
---------
@@ -35,3 +56,8 @@ For a list of all ``google-cloud-spanner`` releases:
:maxdepth: 2
changelog
+
+.. toctree::
+ :hidden:
+
+ summary_overview.md
diff --git a/docs/instance-usage.rst b/docs/instance-usage.rst
index 55042c2df3..b45b69acc6 100644
--- a/docs/instance-usage.rst
+++ b/docs/instance-usage.rst
@@ -1,5 +1,5 @@
-Instance Admin
-==============
+Instance Admin Usage
+====================
After creating a :class:`~google.cloud.spanner_v1.client.Client`, you can
interact with individual instances for a project.
diff --git a/docs/opentelemetry-tracing.rst b/docs/opentelemetry-tracing.rst
index 9b3dea276f..c581d2cb87 100644
--- a/docs/opentelemetry-tracing.rst
+++ b/docs/opentelemetry-tracing.rst
@@ -8,10 +8,8 @@ To take advantage of these traces, we first need to install OpenTelemetry:
.. code-block:: sh
- pip install opentelemetry-api opentelemetry-sdk opentelemetry-instrumentation
-
- # [Optional] Installs the cloud monitoring exporter, however you can use any exporter of your choice
- pip install opentelemetry-exporter-google-cloud
+ pip install opentelemetry-api opentelemetry-sdk
+ pip install opentelemetry-exporter-gcp-trace
We also need to tell OpenTelemetry which exporter to use. To export Spanner traces to `Cloud Tracing `_, add the following lines to your application:
@@ -19,22 +17,80 @@ We also need to tell OpenTelemetry which exporter to use. To export Spanner trac
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
- from opentelemetry.trace.sampling import ProbabilitySampler
+ from opentelemetry.sdk.trace.sampling import TraceIdRatioBased
from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
- # BatchExportSpanProcessor exports spans to Cloud Trace
+ # BatchSpanProcessor exports spans to Cloud Trace
# in a seperate thread to not block on the main thread
- from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
# Create and export one trace every 1000 requests
- sampler = ProbabilitySampler(1/1000)
- # Use the default tracer provider
- trace.set_tracer_provider(TracerProvider(sampler=sampler))
- trace.get_tracer_provider().add_span_processor(
+ sampler = TraceIdRatioBased(1/1000)
+ tracer_provider = TracerProvider(sampler=sampler)
+ tracer_provider.add_span_processor(
# Initialize the cloud tracing exporter
- BatchExportSpanProcessor(CloudTraceSpanExporter())
+ BatchSpanProcessor(CloudTraceSpanExporter())
+ )
+ observability_options = dict(
+ tracer_provider=tracer_provider,
+
+ # By default extended_tracing is set to True due
+ # to legacy reasons to avoid breaking changes, you
+ # can modify it though using the environment variable
+ # SPANNER_ENABLE_EXTENDED_TRACING=false.
+ enable_extended_tracing=False,
+
+ # By default end to end tracing is set to False. Set to True
+ # for getting spans for Spanner server.
+ enable_end_to_end_tracing=True,
)
+ spanner = spanner.NewClient(project_id, observability_options=observability_options)
+
+
+To get more fine-grained traces from gRPC, you can enable the gRPC instrumentation by the following
+
+.. code-block:: sh
+
+ pip install opentelemetry-instrumentation opentelemetry-instrumentation-grpc
+
+and then in your Python code, please add the following lines:
+
+.. code:: python
+
+ from opentelemetry.instrumentation.grpc import GrpcInstrumentorClient
+ grpc_client_instrumentor = GrpcInstrumentorClient()
+ grpc_client_instrumentor.instrument()
+
Generated spanner traces should now be available on `Cloud Trace `_.
Tracing is most effective when many libraries are instrumented to provide insight over the entire lifespan of a request.
For a list of libraries that can be instrumented, see the `OpenTelemetry Integrations` section of the `OpenTelemetry Python docs `_
+
+Annotating spans with SQL
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default your spans will be annotated with SQL statements where appropriate, but that can be a PII (Personally Identifiable Information)
+leak. Sadly due to legacy behavior, we cannot simply turn off this behavior by default. However you can control this behavior by setting
+
+ SPANNER_ENABLE_EXTENDED_TRACING=false
+
+to turn it off globally or when creating each SpannerClient, please set `observability_options.enable_extended_tracing=false`
+
+End to end tracing
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In addition to client-side tracing, you can opt in for end-to-end tracing. End-to-end tracing helps you understand and debug latency issues that are specific to Spanner. Refer [here](https://cloud.google.com/spanner/docs/tracing-overview) for more information.
+
+To configure end-to-end tracing.
+
+1. Opt in for end-to-end tracing. You can opt-in by either:
+* Setting the environment variable `SPANNER_ENABLE_END_TO_END_TRACING=true` before your application is started
+* In code, by setting `observability_options.enable_end_to_end_tracing=true` when creating each SpannerClient.
+
+2. Set the trace context propagation in OpenTelemetry.
+
+.. code:: python
+
+ from opentelemetry.propagate import set_global_textmap
+ from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
+ set_global_textmap(TraceContextTextMapPropagator())
\ No newline at end of file
diff --git a/docs/snapshot-usage.rst b/docs/snapshot-usage.rst
index 311ea8f3ca..0f00686a54 100644
--- a/docs/snapshot-usage.rst
+++ b/docs/snapshot-usage.rst
@@ -24,8 +24,7 @@ reads as of a given timestamp:
.. code:: python
import datetime
- from pytz import UTC
- TIMESTAMP = datetime.datetime.utcnow().replace(tzinfo=UTC)
+ TIMESTAMP = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
with database.snapshot(read_timestamp=TIMESTAMP) as snapshot:
...
diff --git a/docs/spanner_admin_database_v1/services.rst b/docs/spanner_admin_database_v1/services_.rst
similarity index 100%
rename from docs/spanner_admin_database_v1/services.rst
rename to docs/spanner_admin_database_v1/services_.rst
diff --git a/docs/spanner_admin_database_v1/types.rst b/docs/spanner_admin_database_v1/types_.rst
similarity index 91%
rename from docs/spanner_admin_database_v1/types.rst
rename to docs/spanner_admin_database_v1/types_.rst
index 95e1d7f88b..fe6c27778b 100644
--- a/docs/spanner_admin_database_v1/types.rst
+++ b/docs/spanner_admin_database_v1/types_.rst
@@ -3,5 +3,4 @@ Types for Google Cloud Spanner Admin Database v1 API
.. automodule:: google.cloud.spanner_admin_database_v1.types
:members:
- :undoc-members:
:show-inheritance:
diff --git a/docs/spanner_admin_instance_v1/services.rst b/docs/spanner_admin_instance_v1/services_.rst
similarity index 100%
rename from docs/spanner_admin_instance_v1/services.rst
rename to docs/spanner_admin_instance_v1/services_.rst
diff --git a/docs/spanner_admin_instance_v1/types.rst b/docs/spanner_admin_instance_v1/types_.rst
similarity index 91%
rename from docs/spanner_admin_instance_v1/types.rst
rename to docs/spanner_admin_instance_v1/types_.rst
index 8f7204ebce..250cf6bf9b 100644
--- a/docs/spanner_admin_instance_v1/types.rst
+++ b/docs/spanner_admin_instance_v1/types_.rst
@@ -3,5 +3,4 @@ Types for Google Cloud Spanner Admin Instance v1 API
.. automodule:: google.cloud.spanner_admin_instance_v1.types
:members:
- :undoc-members:
:show-inheritance:
diff --git a/docs/batch-api.rst b/docs/spanner_v1/batch.rst
similarity index 100%
rename from docs/batch-api.rst
rename to docs/spanner_v1/batch.rst
diff --git a/docs/client-api.rst b/docs/spanner_v1/client.rst
similarity index 100%
rename from docs/client-api.rst
rename to docs/spanner_v1/client.rst
diff --git a/docs/database-api.rst b/docs/spanner_v1/database.rst
similarity index 100%
rename from docs/database-api.rst
rename to docs/spanner_v1/database.rst
diff --git a/docs/instance-api.rst b/docs/spanner_v1/instance.rst
similarity index 100%
rename from docs/instance-api.rst
rename to docs/spanner_v1/instance.rst
diff --git a/docs/keyset-api.rst b/docs/spanner_v1/keyset.rst
similarity index 100%
rename from docs/keyset-api.rst
rename to docs/spanner_v1/keyset.rst
diff --git a/docs/spanner_v1/services.rst b/docs/spanner_v1/services_.rst
similarity index 100%
rename from docs/spanner_v1/services.rst
rename to docs/spanner_v1/services_.rst
diff --git a/docs/session-api.rst b/docs/spanner_v1/session.rst
similarity index 100%
rename from docs/session-api.rst
rename to docs/spanner_v1/session.rst
diff --git a/docs/snapshot-api.rst b/docs/spanner_v1/snapshot.rst
similarity index 100%
rename from docs/snapshot-api.rst
rename to docs/spanner_v1/snapshot.rst
diff --git a/docs/streamed-api.rst b/docs/spanner_v1/streamed.rst
similarity index 100%
rename from docs/streamed-api.rst
rename to docs/spanner_v1/streamed.rst
diff --git a/docs/table-api.rst b/docs/spanner_v1/table.rst
similarity index 100%
rename from docs/table-api.rst
rename to docs/spanner_v1/table.rst
diff --git a/docs/transaction-api.rst b/docs/spanner_v1/transaction.rst
similarity index 100%
rename from docs/transaction-api.rst
rename to docs/spanner_v1/transaction.rst
diff --git a/docs/spanner_v1/types.rst b/docs/spanner_v1/types_.rst
similarity index 88%
rename from docs/spanner_v1/types.rst
rename to docs/spanner_v1/types_.rst
index 8678aba188..c7ff7e6c71 100644
--- a/docs/spanner_v1/types.rst
+++ b/docs/spanner_v1/types_.rst
@@ -3,5 +3,4 @@ Types for Google Cloud Spanner v1 API
.. automodule:: google.cloud.spanner_v1.types
:members:
- :undoc-members:
:show-inheritance:
diff --git a/docs/summary_overview.md b/docs/summary_overview.md
new file mode 100644
index 0000000000..ffaf71df07
--- /dev/null
+++ b/docs/summary_overview.md
@@ -0,0 +1,22 @@
+[
+This is a templated file. Adding content to this file may result in it being
+reverted. Instead, if you want to place additional content, create an
+"overview_content.md" file in `docs/` directory. The Sphinx tool will
+pick up on the content and merge the content.
+]: #
+
+# Cloud Spanner API
+
+Overview of the APIs available for Cloud Spanner API.
+
+## All entries
+
+Classes, methods and properties & attributes for
+Cloud Spanner API.
+
+[classes](https://cloud.google.com/python/docs/reference/spanner/latest/summary_class.html)
+
+[methods](https://cloud.google.com/python/docs/reference/spanner/latest/summary_method.html)
+
+[properties and
+attributes](https://cloud.google.com/python/docs/reference/spanner/latest/summary_property.html)
diff --git a/docs/table-usage.rst b/docs/table-usage.rst
index 9d28da1ebb..01459b5f8e 100644
--- a/docs/table-usage.rst
+++ b/docs/table-usage.rst
@@ -1,5 +1,5 @@
-Table Admin
-===========
+Table Admin Usage
+=================
After creating an :class:`~google.cloud.spanner_v1.database.Database`, you can
interact with individual tables for that instance.
diff --git a/docs/transaction-usage.rst b/docs/transaction-usage.rst
index 4781cfa148..78026bf5a4 100644
--- a/docs/transaction-usage.rst
+++ b/docs/transaction-usage.rst
@@ -5,7 +5,8 @@ A :class:`~google.cloud.spanner_v1.transaction.Transaction` represents a
transaction: when the transaction commits, it will send any accumulated
mutations to the server.
-To understand more about how transactions work, visit [Transaction](https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction).
+To understand more about how transactions work, visit
+`Transaction `_.
To learn more about how to use them in the Python client, continue reading.
@@ -90,8 +91,8 @@ any of the records already exists.
Update records using a Transaction
----------------------------------
-:meth:`Transaction.update` updates one or more existing records in a table. Fails
-if any of the records does not already exist.
+:meth:`Transaction.update` updates one or more existing records in a table.
+Fails if any of the records does not already exist.
.. code:: python
@@ -178,9 +179,9 @@ Using :meth:`~Database.run_in_transaction`
Rather than calling :meth:`~Transaction.commit` or :meth:`~Transaction.rollback`
manually, you should use :meth:`~Database.run_in_transaction` to run the
-function that you need. The transaction's :meth:`~Transaction.commit` method
+function that you need. The transaction's :meth:`~Transaction.commit` method
will be called automatically if the ``with`` block exits without raising an
-exception. The function will automatically be retried for
+exception. The function will automatically be retried for
:class:`~google.api_core.exceptions.Aborted` errors, but will raise on
:class:`~google.api_core.exceptions.GoogleAPICallError` and
:meth:`~Transaction.rollback` will be called on all others.
@@ -188,25 +189,30 @@ exception. The function will automatically be retried for
.. code:: python
def _unit_of_work(transaction):
-
transaction.insert(
- 'citizens', columns=['email', 'first_name', 'last_name', 'age'],
+ 'citizens',
+ columns=['email', 'first_name', 'last_name', 'age'],
values=[
['phred@exammple.com', 'Phred', 'Phlyntstone', 32],
['bharney@example.com', 'Bharney', 'Rhubble', 31],
- ])
+ ]
+ )
transaction.update(
- 'citizens', columns=['email', 'age'],
+ 'citizens',
+ columns=['email', 'age'],
values=[
['phred@exammple.com', 33],
['bharney@example.com', 32],
- ])
+ ]
+ )
...
- transaction.delete('citizens',
- keyset['bharney@example.com', 'nonesuch@example.com'])
+ transaction.delete(
+ 'citizens',
+ keyset=['bharney@example.com', 'nonesuch@example.com']
+ )
db.run_in_transaction(_unit_of_work)
@@ -242,7 +248,7 @@ If an exception is raised inside the ``with`` block, the transaction's
...
transaction.delete('citizens',
- keyset['bharney@example.com', 'nonesuch@example.com'])
+ keyset=['bharney@example.com', 'nonesuch@example.com'])
Begin a Transaction
diff --git a/examples/grpc_instrumentation_enabled.py b/examples/grpc_instrumentation_enabled.py
new file mode 100644
index 0000000000..c8bccd0a9d
--- /dev/null
+++ b/examples/grpc_instrumentation_enabled.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+import os
+import time
+
+import google.cloud.spanner as spanner
+from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
+from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.sdk.trace.export import BatchSpanProcessor
+from opentelemetry.sdk.trace.sampling import ALWAYS_ON
+from opentelemetry import trace
+
+# Enable the gRPC instrumentation if you'd like more introspection.
+from opentelemetry.instrumentation.grpc import GrpcInstrumentorClient
+
+grpc_client_instrumentor = GrpcInstrumentorClient()
+grpc_client_instrumentor.instrument()
+
+
+def main():
+ # Setup common variables that'll be used between Spanner and traces.
+ project_id = os.environ.get('SPANNER_PROJECT_ID', 'test-project')
+
+ # Setup OpenTelemetry, trace and Cloud Trace exporter.
+ tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ trace_exporter = CloudTraceSpanExporter(project_id=project_id)
+ tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter))
+ trace.set_tracer_provider(tracer_provider)
+ # Retrieve a tracer from the global tracer provider.
+ tracer = tracer_provider.get_tracer('MyApp')
+
+ # Setup the Cloud Spanner Client.
+ spanner_client = spanner.Client(project_id)
+
+ instance = spanner_client.instance('test-instance')
+ database = instance.database('test-db')
+
+ # Now run our queries
+ with tracer.start_as_current_span('QueryInformationSchema'):
+ with database.snapshot() as snapshot:
+ with tracer.start_as_current_span('InformationSchema'):
+ info_schema = snapshot.execute_sql(
+ 'SELECT * FROM INFORMATION_SCHEMA.TABLES')
+ for row in info_schema:
+ print(row)
+
+ with tracer.start_as_current_span('ServerTimeQuery'):
+ with database.snapshot() as snapshot:
+ # Purposefully issue a bad SQL statement to examine exceptions
+ # that get recorded and a ERROR span status.
+ try:
+ data = snapshot.execute_sql('SELECT CURRENT_TIMESTAMPx()')
+ for row in data:
+ print(row)
+ except Exception as e:
+ pass
+
+
+if __name__ == '__main__':
+ main()
diff --git a/examples/trace.py b/examples/trace.py
new file mode 100644
index 0000000000..5b826ca5ad
--- /dev/null
+++ b/examples/trace.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+import os
+import time
+
+import google.cloud.spanner as spanner
+from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
+from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
+from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.sdk.trace.export import BatchSpanProcessor
+from opentelemetry.sdk.trace.sampling import ALWAYS_ON
+from opentelemetry import trace
+from opentelemetry.propagate import set_global_textmap
+from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
+
+# Setup common variables that'll be used between Spanner and traces.
+project_id = os.environ.get('SPANNER_PROJECT_ID', 'test-project')
+
+def spanner_with_cloud_trace():
+ # [START spanner_opentelemetry_traces_cloudtrace_usage]
+ # Setup OpenTelemetry, trace and Cloud Trace exporter.
+ tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ trace_exporter = CloudTraceSpanExporter(project_id=project_id)
+ tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter))
+
+ # Setup the Cloud Spanner Client.
+ spanner_client = spanner.Client(
+ project_id,
+ observability_options=dict(tracer_provider=tracer_provider, enable_extended_tracing=True, enable_end_to_end_tracing=True),
+ )
+
+ # [END spanner_opentelemetry_traces_cloudtrace_usage]
+ return spanner_client
+
+def spanner_with_otlp():
+ # [START spanner_opentelemetry_traces_otlp_usage]
+ # Setup OpenTelemetry, trace and OTLP exporter.
+ tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ otlp_exporter = OTLPSpanExporter(endpoint="http://localhost:4317")
+ tracer_provider.add_span_processor(BatchSpanProcessor(otlp_exporter))
+
+ # Setup the Cloud Spanner Client.
+ spanner_client = spanner.Client(
+ project_id,
+ observability_options=dict(tracer_provider=tracer_provider, enable_extended_tracing=True, enable_end_to_end_tracing=True),
+ )
+ # [END spanner_opentelemetry_traces_otlp_usage]
+ return spanner_client
+
+
+def main():
+ # Setup OpenTelemetry, trace and Cloud Trace exporter.
+ tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ trace_exporter = CloudTraceSpanExporter(project_id=project_id)
+ tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter))
+
+ # Setup the Cloud Spanner Client.
+ # Change to "spanner_client = spanner_with_otlp" to use OTLP exporter
+ spanner_client = spanner_with_cloud_trace()
+ instance = spanner_client.instance('test-instance')
+ database = instance.database('test-db')
+
+ # Set W3C Trace Context as the global propagator for end to end tracing.
+ set_global_textmap(TraceContextTextMapPropagator())
+
+ # Retrieve a tracer from our custom tracer provider.
+ tracer = tracer_provider.get_tracer('MyApp')
+
+ # Now run our queries
+ with tracer.start_as_current_span('QueryInformationSchema'):
+ with database.snapshot() as snapshot:
+ with tracer.start_as_current_span('InformationSchema'):
+ info_schema = snapshot.execute_sql(
+ 'SELECT * FROM INFORMATION_SCHEMA.TABLES')
+ for row in info_schema:
+ print(row)
+
+ with tracer.start_as_current_span('ServerTimeQuery'):
+ with database.snapshot() as snapshot:
+ # Purposefully issue a bad SQL statement to examine exceptions
+ # that get recorded and a ERROR span status.
+ try:
+ data = snapshot.execute_sql('SELECT CURRENT_TIMESTAMPx()')
+ for row in data:
+ print(row)
+ except Exception as e:
+ print(e)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/google/__init__.py b/google/__init__.py
deleted file mode 100644
index 2f4b4738ae..0000000000
--- a/google/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-try:
- import pkg_resources
-
- pkg_resources.declare_namespace(__name__)
-except ImportError:
- import pkgutil
-
- __path__ = pkgutil.extend_path(__path__, __name__)
diff --git a/google/cloud/__init__.py b/google/cloud/__init__.py
deleted file mode 100644
index 2f4b4738ae..0000000000
--- a/google/cloud/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-try:
- import pkg_resources
-
- pkg_resources.declare_namespace(__name__)
-except ImportError:
- import pkgutil
-
- __path__ = pkgutil.extend_path(__path__, __name__)
diff --git a/google/cloud/spanner_admin_database_v1/__init__.py b/google/cloud/spanner_admin_database_v1/__init__.py
index a6272a0ea2..d7fddf0236 100644
--- a/google/cloud/spanner_admin_database_v1/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,34 +13,62 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from google.cloud.spanner_admin_database_v1 import gapic_version as package_version
+
+__version__ = package_version.__version__
+
from .services.database_admin import DatabaseAdminClient
from .services.database_admin import DatabaseAdminAsyncClient
from .types.backup import Backup
from .types.backup import BackupInfo
+from .types.backup import BackupInstancePartition
+from .types.backup import CopyBackupEncryptionConfig
+from .types.backup import CopyBackupMetadata
+from .types.backup import CopyBackupRequest
from .types.backup import CreateBackupEncryptionConfig
from .types.backup import CreateBackupMetadata
from .types.backup import CreateBackupRequest
from .types.backup import DeleteBackupRequest
+from .types.backup import FullBackupSpec
from .types.backup import GetBackupRequest
+from .types.backup import IncrementalBackupSpec
from .types.backup import ListBackupOperationsRequest
from .types.backup import ListBackupOperationsResponse
from .types.backup import ListBackupsRequest
from .types.backup import ListBackupsResponse
from .types.backup import UpdateBackupRequest
+from .types.backup_schedule import BackupSchedule
+from .types.backup_schedule import BackupScheduleSpec
+from .types.backup_schedule import CreateBackupScheduleRequest
+from .types.backup_schedule import CrontabSpec
+from .types.backup_schedule import DeleteBackupScheduleRequest
+from .types.backup_schedule import GetBackupScheduleRequest
+from .types.backup_schedule import ListBackupSchedulesRequest
+from .types.backup_schedule import ListBackupSchedulesResponse
+from .types.backup_schedule import UpdateBackupScheduleRequest
from .types.common import EncryptionConfig
from .types.common import EncryptionInfo
from .types.common import OperationProgress
+from .types.common import DatabaseDialect
+from .types.spanner_database_admin import AddSplitPointsRequest
+from .types.spanner_database_admin import AddSplitPointsResponse
from .types.spanner_database_admin import CreateDatabaseMetadata
from .types.spanner_database_admin import CreateDatabaseRequest
from .types.spanner_database_admin import Database
+from .types.spanner_database_admin import DatabaseRole
+from .types.spanner_database_admin import DdlStatementActionInfo
from .types.spanner_database_admin import DropDatabaseRequest
from .types.spanner_database_admin import GetDatabaseDdlRequest
from .types.spanner_database_admin import GetDatabaseDdlResponse
from .types.spanner_database_admin import GetDatabaseRequest
+from .types.spanner_database_admin import InternalUpdateGraphOperationRequest
+from .types.spanner_database_admin import InternalUpdateGraphOperationResponse
from .types.spanner_database_admin import ListDatabaseOperationsRequest
from .types.spanner_database_admin import ListDatabaseOperationsResponse
+from .types.spanner_database_admin import ListDatabaseRolesRequest
+from .types.spanner_database_admin import ListDatabaseRolesResponse
from .types.spanner_database_admin import ListDatabasesRequest
from .types.spanner_database_admin import ListDatabasesResponse
from .types.spanner_database_admin import OptimizeRestoredDatabaseMetadata
@@ -48,35 +76,61 @@
from .types.spanner_database_admin import RestoreDatabaseMetadata
from .types.spanner_database_admin import RestoreDatabaseRequest
from .types.spanner_database_admin import RestoreInfo
+from .types.spanner_database_admin import SplitPoints
from .types.spanner_database_admin import UpdateDatabaseDdlMetadata
from .types.spanner_database_admin import UpdateDatabaseDdlRequest
+from .types.spanner_database_admin import UpdateDatabaseMetadata
+from .types.spanner_database_admin import UpdateDatabaseRequest
from .types.spanner_database_admin import RestoreSourceType
__all__ = (
"DatabaseAdminAsyncClient",
+ "AddSplitPointsRequest",
+ "AddSplitPointsResponse",
"Backup",
"BackupInfo",
+ "BackupInstancePartition",
+ "BackupSchedule",
+ "BackupScheduleSpec",
+ "CopyBackupEncryptionConfig",
+ "CopyBackupMetadata",
+ "CopyBackupRequest",
"CreateBackupEncryptionConfig",
"CreateBackupMetadata",
"CreateBackupRequest",
+ "CreateBackupScheduleRequest",
"CreateDatabaseMetadata",
"CreateDatabaseRequest",
+ "CrontabSpec",
"Database",
"DatabaseAdminClient",
+ "DatabaseDialect",
+ "DatabaseRole",
+ "DdlStatementActionInfo",
"DeleteBackupRequest",
+ "DeleteBackupScheduleRequest",
"DropDatabaseRequest",
"EncryptionConfig",
"EncryptionInfo",
+ "FullBackupSpec",
"GetBackupRequest",
+ "GetBackupScheduleRequest",
"GetDatabaseDdlRequest",
"GetDatabaseDdlResponse",
"GetDatabaseRequest",
+ "IncrementalBackupSpec",
+ "InternalUpdateGraphOperationRequest",
+ "InternalUpdateGraphOperationResponse",
"ListBackupOperationsRequest",
"ListBackupOperationsResponse",
+ "ListBackupSchedulesRequest",
+ "ListBackupSchedulesResponse",
"ListBackupsRequest",
"ListBackupsResponse",
"ListDatabaseOperationsRequest",
"ListDatabaseOperationsResponse",
+ "ListDatabaseRolesRequest",
+ "ListDatabaseRolesResponse",
"ListDatabasesRequest",
"ListDatabasesResponse",
"OperationProgress",
@@ -86,7 +140,11 @@
"RestoreDatabaseRequest",
"RestoreInfo",
"RestoreSourceType",
+ "SplitPoints",
"UpdateBackupRequest",
+ "UpdateBackupScheduleRequest",
"UpdateDatabaseDdlMetadata",
"UpdateDatabaseDdlRequest",
+ "UpdateDatabaseMetadata",
+ "UpdateDatabaseRequest",
)
diff --git a/google/cloud/spanner_admin_database_v1/gapic_metadata.json b/google/cloud/spanner_admin_database_v1/gapic_metadata.json
index 1460097dc3..027a4f612b 100644
--- a/google/cloud/spanner_admin_database_v1/gapic_metadata.json
+++ b/google/cloud/spanner_admin_database_v1/gapic_metadata.json
@@ -10,11 +10,26 @@
"grpc": {
"libraryClient": "DatabaseAdminClient",
"rpcs": {
+ "AddSplitPoints": {
+ "methods": [
+ "add_split_points"
+ ]
+ },
+ "CopyBackup": {
+ "methods": [
+ "copy_backup"
+ ]
+ },
"CreateBackup": {
"methods": [
"create_backup"
]
},
+ "CreateBackupSchedule": {
+ "methods": [
+ "create_backup_schedule"
+ ]
+ },
"CreateDatabase": {
"methods": [
"create_database"
@@ -25,6 +40,11 @@
"delete_backup"
]
},
+ "DeleteBackupSchedule": {
+ "methods": [
+ "delete_backup_schedule"
+ ]
+ },
"DropDatabase": {
"methods": [
"drop_database"
@@ -35,6 +55,11 @@
"get_backup"
]
},
+ "GetBackupSchedule": {
+ "methods": [
+ "get_backup_schedule"
+ ]
+ },
"GetDatabase": {
"methods": [
"get_database"
@@ -50,11 +75,21 @@
"get_iam_policy"
]
},
+ "InternalUpdateGraphOperation": {
+ "methods": [
+ "internal_update_graph_operation"
+ ]
+ },
"ListBackupOperations": {
"methods": [
"list_backup_operations"
]
},
+ "ListBackupSchedules": {
+ "methods": [
+ "list_backup_schedules"
+ ]
+ },
"ListBackups": {
"methods": [
"list_backups"
@@ -65,6 +100,11 @@
"list_database_operations"
]
},
+ "ListDatabaseRoles": {
+ "methods": [
+ "list_database_roles"
+ ]
+ },
"ListDatabases": {
"methods": [
"list_databases"
@@ -90,6 +130,16 @@
"update_backup"
]
},
+ "UpdateBackupSchedule": {
+ "methods": [
+ "update_backup_schedule"
+ ]
+ },
+ "UpdateDatabase": {
+ "methods": [
+ "update_database"
+ ]
+ },
"UpdateDatabaseDdl": {
"methods": [
"update_database_ddl"
@@ -100,11 +150,26 @@
"grpc-async": {
"libraryClient": "DatabaseAdminAsyncClient",
"rpcs": {
+ "AddSplitPoints": {
+ "methods": [
+ "add_split_points"
+ ]
+ },
+ "CopyBackup": {
+ "methods": [
+ "copy_backup"
+ ]
+ },
"CreateBackup": {
"methods": [
"create_backup"
]
},
+ "CreateBackupSchedule": {
+ "methods": [
+ "create_backup_schedule"
+ ]
+ },
"CreateDatabase": {
"methods": [
"create_database"
@@ -115,6 +180,11 @@
"delete_backup"
]
},
+ "DeleteBackupSchedule": {
+ "methods": [
+ "delete_backup_schedule"
+ ]
+ },
"DropDatabase": {
"methods": [
"drop_database"
@@ -125,6 +195,11 @@
"get_backup"
]
},
+ "GetBackupSchedule": {
+ "methods": [
+ "get_backup_schedule"
+ ]
+ },
"GetDatabase": {
"methods": [
"get_database"
@@ -140,11 +215,21 @@
"get_iam_policy"
]
},
+ "InternalUpdateGraphOperation": {
+ "methods": [
+ "internal_update_graph_operation"
+ ]
+ },
"ListBackupOperations": {
"methods": [
"list_backup_operations"
]
},
+ "ListBackupSchedules": {
+ "methods": [
+ "list_backup_schedules"
+ ]
+ },
"ListBackups": {
"methods": [
"list_backups"
@@ -155,6 +240,11 @@
"list_database_operations"
]
},
+ "ListDatabaseRoles": {
+ "methods": [
+ "list_database_roles"
+ ]
+ },
"ListDatabases": {
"methods": [
"list_databases"
@@ -180,6 +270,156 @@
"update_backup"
]
},
+ "UpdateBackupSchedule": {
+ "methods": [
+ "update_backup_schedule"
+ ]
+ },
+ "UpdateDatabase": {
+ "methods": [
+ "update_database"
+ ]
+ },
+ "UpdateDatabaseDdl": {
+ "methods": [
+ "update_database_ddl"
+ ]
+ }
+ }
+ },
+ "rest": {
+ "libraryClient": "DatabaseAdminClient",
+ "rpcs": {
+ "AddSplitPoints": {
+ "methods": [
+ "add_split_points"
+ ]
+ },
+ "CopyBackup": {
+ "methods": [
+ "copy_backup"
+ ]
+ },
+ "CreateBackup": {
+ "methods": [
+ "create_backup"
+ ]
+ },
+ "CreateBackupSchedule": {
+ "methods": [
+ "create_backup_schedule"
+ ]
+ },
+ "CreateDatabase": {
+ "methods": [
+ "create_database"
+ ]
+ },
+ "DeleteBackup": {
+ "methods": [
+ "delete_backup"
+ ]
+ },
+ "DeleteBackupSchedule": {
+ "methods": [
+ "delete_backup_schedule"
+ ]
+ },
+ "DropDatabase": {
+ "methods": [
+ "drop_database"
+ ]
+ },
+ "GetBackup": {
+ "methods": [
+ "get_backup"
+ ]
+ },
+ "GetBackupSchedule": {
+ "methods": [
+ "get_backup_schedule"
+ ]
+ },
+ "GetDatabase": {
+ "methods": [
+ "get_database"
+ ]
+ },
+ "GetDatabaseDdl": {
+ "methods": [
+ "get_database_ddl"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "get_iam_policy"
+ ]
+ },
+ "InternalUpdateGraphOperation": {
+ "methods": [
+ "internal_update_graph_operation"
+ ]
+ },
+ "ListBackupOperations": {
+ "methods": [
+ "list_backup_operations"
+ ]
+ },
+ "ListBackupSchedules": {
+ "methods": [
+ "list_backup_schedules"
+ ]
+ },
+ "ListBackups": {
+ "methods": [
+ "list_backups"
+ ]
+ },
+ "ListDatabaseOperations": {
+ "methods": [
+ "list_database_operations"
+ ]
+ },
+ "ListDatabaseRoles": {
+ "methods": [
+ "list_database_roles"
+ ]
+ },
+ "ListDatabases": {
+ "methods": [
+ "list_databases"
+ ]
+ },
+ "RestoreDatabase": {
+ "methods": [
+ "restore_database"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "set_iam_policy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "test_iam_permissions"
+ ]
+ },
+ "UpdateBackup": {
+ "methods": [
+ "update_backup"
+ ]
+ },
+ "UpdateBackupSchedule": {
+ "methods": [
+ "update_backup_schedule"
+ ]
+ },
+ "UpdateDatabase": {
+ "methods": [
+ "update_database"
+ ]
+ },
"UpdateDatabaseDdl": {
"methods": [
"update_database_ddl"
diff --git a/setup.cfg b/google/cloud/spanner_admin_database_v1/gapic_version.py
similarity index 78%
rename from setup.cfg
rename to google/cloud/spanner_admin_database_v1/gapic_version.py
index c3a2b39f65..fa3f4c040d 100644
--- a/setup.cfg
+++ b/google/cloud/spanner_admin_database_v1/gapic_version.py
@@ -1,19 +1,16 @@
# -*- coding: utf-8 -*-
-#
-# Copyright 2020 Google LLC
+# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# https://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-# Generated by synthtool. DO NOT EDIT!
-[bdist_wheel]
-universal = 1
+#
+__version__ = "3.58.0" # {x-release-please-version}
diff --git a/google/cloud/spanner_admin_database_v1/services/__init__.py b/google/cloud/spanner_admin_database_v1/services/__init__.py
index 4de65971c2..cbf94b283c 100644
--- a/google/cloud/spanner_admin_database_v1/services/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/services/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py b/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py
index abe449ebfa..580a7ed2a2 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
index d9178c81a4..0e08065a7d 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,29 +13,55 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+import logging as std_logging
from collections import OrderedDict
-import functools
import re
-from typing import Dict, Sequence, Tuple, Type, Union
-import pkg_resources
-
-import google.api_core.client_options as ClientOptions # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from typing import (
+ Dict,
+ Callable,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
+import uuid
+
+from google.cloud.spanner_admin_database_v1 import gapic_version as package_version
+
+from google.api_core.client_options import ClientOptions
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry_async as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
+import google.protobuf
+
+
+try:
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.spanner_admin_database_v1.services.database_admin import pagers
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import common
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
@@ -43,23 +69,42 @@
from .transports.grpc_asyncio import DatabaseAdminGrpcAsyncIOTransport
from .client import DatabaseAdminClient
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
+
class DatabaseAdminAsyncClient:
"""Cloud Spanner Database Admin API
- The Cloud Spanner Database Admin API can be used to create,
- drop, and list databases. It also enables updating the schema of
- pre-existing databases. It can be also used to create, delete
- and list backups for a database and to restore from an existing
- backup.
+
+ The Cloud Spanner Database Admin API can be used to:
+
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
"""
_client: DatabaseAdminClient
+ # Copy defaults from the synchronous client for use here.
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
DEFAULT_ENDPOINT = DatabaseAdminClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = DatabaseAdminClient.DEFAULT_MTLS_ENDPOINT
+ _DEFAULT_ENDPOINT_TEMPLATE = DatabaseAdminClient._DEFAULT_ENDPOINT_TEMPLATE
+ _DEFAULT_UNIVERSE = DatabaseAdminClient._DEFAULT_UNIVERSE
backup_path = staticmethod(DatabaseAdminClient.backup_path)
parse_backup_path = staticmethod(DatabaseAdminClient.parse_backup_path)
+ backup_schedule_path = staticmethod(DatabaseAdminClient.backup_schedule_path)
+ parse_backup_schedule_path = staticmethod(
+ DatabaseAdminClient.parse_backup_schedule_path
+ )
crypto_key_path = staticmethod(DatabaseAdminClient.crypto_key_path)
parse_crypto_key_path = staticmethod(DatabaseAdminClient.parse_crypto_key_path)
crypto_key_version_path = staticmethod(DatabaseAdminClient.crypto_key_version_path)
@@ -68,8 +113,16 @@ class DatabaseAdminAsyncClient:
)
database_path = staticmethod(DatabaseAdminClient.database_path)
parse_database_path = staticmethod(DatabaseAdminClient.parse_database_path)
+ database_role_path = staticmethod(DatabaseAdminClient.database_role_path)
+ parse_database_role_path = staticmethod(
+ DatabaseAdminClient.parse_database_role_path
+ )
instance_path = staticmethod(DatabaseAdminClient.instance_path)
parse_instance_path = staticmethod(DatabaseAdminClient.parse_instance_path)
+ instance_partition_path = staticmethod(DatabaseAdminClient.instance_partition_path)
+ parse_instance_partition_path = staticmethod(
+ DatabaseAdminClient.parse_instance_partition_path
+ )
common_billing_account_path = staticmethod(
DatabaseAdminClient.common_billing_account_path
)
@@ -128,6 +181,42 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
from_service_account_json = from_service_account_file
+ @classmethod
+ def get_mtls_endpoint_and_cert_source(
+ cls, client_options: Optional[ClientOptions] = None
+ ):
+ """Return the API endpoint and client cert source for mutual TLS.
+
+ The client cert source is determined in the following order:
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
+ client cert source is None.
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
+ default client cert source exists, use the default one; otherwise the client cert
+ source is None.
+
+ The API endpoint is determined in the following order:
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
+ default mTLS endpoint; if the environment variable is "never", use the default API
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
+ use the default API endpoint.
+
+ More details can be found at https://google.aip.dev/auth/4114.
+
+ Args:
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
+ in this method.
+
+ Returns:
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
+ client cert source to use.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
+ """
+ return DatabaseAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
+
@property
def transport(self) -> DatabaseAdminTransport:
"""Returns the transport used by the client instance.
@@ -137,19 +226,38 @@ def transport(self) -> DatabaseAdminTransport:
"""
return self._client.transport
- get_transport_class = functools.partial(
- type(DatabaseAdminClient).get_transport_class, type(DatabaseAdminClient)
- )
+ @property
+ def api_endpoint(self):
+ """Return the API endpoint used by the client instance.
+
+ Returns:
+ str: The API endpoint used by the client instance.
+ """
+ return self._client._api_endpoint
+
+ @property
+ def universe_domain(self) -> str:
+ """Return the universe domain used by the client instance.
+
+ Returns:
+ str: The universe domain used
+ by the client instance.
+ """
+ return self._client._universe_domain
+
+ get_transport_class = DatabaseAdminClient.get_transport_class
def __init__(
self,
*,
- credentials: ga_credentials.Credentials = None,
- transport: Union[str, DatabaseAdminTransport] = "grpc_asyncio",
- client_options: ClientOptions = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ transport: Optional[
+ Union[str, DatabaseAdminTransport, Callable[..., DatabaseAdminTransport]]
+ ] = "grpc_asyncio",
+ client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiates the database admin client.
+ """Instantiates the database admin async client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -157,26 +265,43 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- transport (Union[str, ~.DatabaseAdminTransport]): The
- transport to use. If set to None, a transport is chosen
- automatically.
- client_options (ClientOptions): Custom options for the client. It
- won't take effect if a ``transport`` instance is provided.
- (1) The ``api_endpoint`` property can be used to override the
- default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
- environment variable can also be used to override the endpoint:
+ transport (Optional[Union[str,DatabaseAdminTransport,Callable[..., DatabaseAdminTransport]]]):
+ The transport to use, or a Callable that constructs and returns a new transport to use.
+ If a Callable is given, it will be called with the same set of initialization
+ arguments as used in the DatabaseAdminTransport constructor.
+ If set to None, a transport is chosen automatically.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
+ Custom options for the client.
+
+ 1. The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client when ``transport`` is
+ not explicitly provided. Only if this property is not set and
+ ``transport`` was not explicitly provided, the endpoint is
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
+ variable, which have one of the following values:
"always" (always use the default mTLS endpoint), "never" (always
- use the default regular endpoint) and "auto" (auto switch to the
- default mTLS endpoint if client certificate is present, this is
- the default value). However, the ``api_endpoint`` property takes
- precedence if provided.
- (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ use the default regular endpoint) and "auto" (auto-switch to the
+ default mTLS endpoint if client certificate is present; this is
+ the default value).
+
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
- to provide client certificate for mutual TLS transport. If
+ to provide a client certificate for mTLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
+ 3. The ``universe_domain`` property can be used to override the
+ default "googleapis.com" universe. Note that ``api_endpoint``
+ property still takes precedence; and ``universe_domain`` is
+ currently not supported for mTLS.
+
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
@@ -188,19 +313,70 @@ def __init__(
client_info=client_info,
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ ): # pragma: NO COVER
+ _LOGGER.debug(
+ "Created client `google.spanner.admin.database_v1.DatabaseAdminAsyncClient`.",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "universeDomain": getattr(
+ self._client._transport._credentials, "universe_domain", ""
+ ),
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
+ "credentialsInfo": getattr(
+ self.transport._credentials, "get_cred_info", lambda: None
+ )(),
+ }
+ if hasattr(self._client._transport, "_credentials")
+ else {
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "credentialsType": None,
+ },
+ )
+
async def list_databases(
self,
- request: spanner_database_admin.ListDatabasesRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabasesRequest, dict]
+ ] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabasesAsyncPager:
r"""Lists Cloud Spanner databases.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_list_databases():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListDatabasesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_databases(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest, dict]]):
The request object. The request for
[ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
parent (:class:`str`):
@@ -211,32 +387,40 @@ async def list_databases(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabasesAsyncPager:
The response for
- [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+ [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.ListDatabasesRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.ListDatabasesRequest):
+ request = spanner_database_admin.ListDatabasesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -245,21 +429,9 @@ async def list_databases(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_databases,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_databases
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -267,13 +439,26 @@ async def list_databases(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListDatabasesAsyncPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
@@ -281,13 +466,15 @@ async def list_databases(
async def create_database(
self,
- request: spanner_database_admin.CreateDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.CreateDatabaseRequest, dict]
+ ] = None,
*,
- parent: str = None,
- create_statement: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ create_statement: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Cloud Spanner database and starts to prepare it
for serving. The returned [long-running
@@ -300,8 +487,39 @@ async def create_database(
is [Database][google.spanner.admin.database.v1.Database], if
successful.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_create_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateDatabaseRequest(
+ parent="parent_value",
+ create_statement="create_statement_value",
+ )
+
+ # Make the request
+ operation = client.create_database(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.CreateDatabaseRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.CreateDatabaseRequest, dict]]):
The request object. The request for
[CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
parent (:class:`str`):
@@ -324,11 +542,13 @@ async def create_database(
This corresponds to the ``create_statement`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation_async.AsyncOperation:
@@ -340,16 +560,22 @@ async def create_database(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, create_statement])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, create_statement]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.CreateDatabaseRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.CreateDatabaseRequest):
+ request = spanner_database_admin.CreateDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -360,11 +586,9 @@ async def create_database(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.create_database,
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_database
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -372,8 +596,16 @@ async def create_database(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation_async.from_gapic(
@@ -388,17 +620,45 @@ async def create_database(
async def get_database(
self,
- request: spanner_database_admin.GetDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.GetDatabaseRequest, dict]
+ ] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_database_admin.Database:
r"""Gets the state of a Cloud Spanner database.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_get_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetDatabaseRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_database(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.GetDatabaseRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.GetDatabaseRequest, dict]]):
The request object. The request for
[GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
name (:class:`str`):
@@ -409,27 +669,35 @@ async def get_database(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Database:
A Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.GetDatabaseRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.GetDatabaseRequest):
+ request = spanner_database_admin.GetDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -438,21 +706,9 @@ async def get_database(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_database,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_database
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -460,21 +716,212 @@ async def get_database(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_database(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.UpdateDatabaseRequest, dict]
+ ] = None,
+ *,
+ database: Optional[spanner_database_admin.Database] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates a Cloud Spanner database. The returned [long-running
+ operation][google.longrunning.Operation] can be used to track
+ the progress of updating the database. If the named database
+ does not exist, returns ``NOT_FOUND``.
+
+ While the operation is pending:
+
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field is set to true.
+ - Cancelling the operation is best-effort. If the cancellation
+ succeeds, the operation metadata's
+ [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ is set, the updates are reverted, and the operation terminates
+ with a ``CANCELLED`` status.
+ - New UpdateDatabase requests will return a
+ ``FAILED_PRECONDITION`` error until the pending operation is
+ done (returns successfully or with error).
+ - Reading the database via the API continues to give the
+ pre-request values.
+
+ Upon completion of the returned operation:
+
+ - The new values are in effect and readable via the API.
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field becomes false.
+
+ The returned [long-running
+ operation][google.longrunning.Operation] will have a name of the
+ format
+ ``projects//instances//databases//operations/``
+ and can be used to track the database modification. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Database][google.spanner.admin.database.v1.Database], if
+ successful.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_update_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ database = spanner_admin_database_v1.Database()
+ database.name = "name_value"
+
+ request = spanner_admin_database_v1.UpdateDatabaseRequest(
+ database=database,
+ )
+
+ # Make the request
+ operation = client.update_database(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseRequest, dict]]):
+ The request object. The request for
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+ database (:class:`google.cloud.spanner_admin_database_v1.types.Database`):
+ Required. The database to update. The ``name`` field of
+ the database is of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. The list of fields to update. Currently, only
+ ``enable_drop_protection`` field can be updated.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.spanner_admin_database_v1.types.Database`
+ A Cloud Spanner database.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.UpdateDatabaseRequest):
+ request = spanner_database_admin.UpdateDatabaseRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_database
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("database.name", request.database.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ spanner_database_admin.Database,
+ metadata_type=spanner_database_admin.UpdateDatabaseMetadata,
+ )
# Done; return the response.
return response
async def update_database_ddl(
self,
- request: spanner_database_admin.UpdateDatabaseDdlRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.UpdateDatabaseDdlRequest, dict]
+ ] = None,
*,
- database: str = None,
- statements: Sequence[str] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ database: Optional[str] = None,
+ statements: Optional[MutableSequence[str]] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation_async.AsyncOperation:
r"""Updates the schema of a Cloud Spanner database by
creating/altering/dropping tables, columns, indexes, etc. The
@@ -486,21 +933,52 @@ async def update_database_ddl(
[UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
The operation has no response.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_update_database_ddl():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateDatabaseDdlRequest(
+ database="database_value",
+ statements=['statements_value1', 'statements_value2'],
+ )
+
+ # Make the request
+ operation = client.update_database_ddl(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.UpdateDatabaseDdlRequest`):
- The request object. Enqueues the given DDL statements to
- be applied, in order but not necessarily all at once, to
- the database schema at some point (or points) in the
- future. The server checks that the statements are
- executable (syntactically valid, name tables that exist,
- etc.) before enqueueing them, but they may still fail
- upon
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseDdlRequest, dict]]):
+ The request object. Enqueues the given DDL statements to be applied, in
+ order but not necessarily all at once, to the database
+ schema at some point (or points) in the future. The
+ server checks that the statements are executable
+ (syntactically valid, name tables that exist, etc.)
+ before enqueueing them, but they may still fail upon
later execution (e.g., if a statement from another batch
of statements is applied first and it conflicts in some
way, or if there is some data-related problem like a
- `NULL` value in a column to which `NOT NULL` would be
- added). If a statement fails, all subsequent statements
- in the batch are automatically cancelled.
+ ``NULL`` value in a column to which ``NOT NULL`` would
+ be added). If a statement fails, all subsequent
+ statements in the batch are automatically cancelled.
+
Each batch of statements is assigned a name which can be
used with the
[Operations][google.longrunning.Operations] API to
@@ -512,18 +990,20 @@ async def update_database_ddl(
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- statements (:class:`Sequence[str]`):
+ statements (:class:`MutableSequence[str]`):
Required. DDL statements to be
applied to the database.
This corresponds to the ``statements`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation_async.AsyncOperation:
@@ -540,21 +1020,24 @@ async def update_database_ddl(
}
- The JSON representation for Empty is empty JSON
- object {}.
-
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database, statements])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, statements]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.UpdateDatabaseDdlRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.UpdateDatabaseDdlRequest):
+ request = spanner_database_admin.UpdateDatabaseDdlRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -565,21 +1048,9 @@ async def update_database_ddl(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.update_database_ddl,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_database_ddl
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -587,8 +1058,16 @@ async def update_database_ddl(
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation_async.from_gapic(
@@ -603,19 +1082,45 @@ async def update_database_ddl(
async def drop_database(
self,
- request: spanner_database_admin.DropDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.DropDatabaseRequest, dict]
+ ] = None,
*,
- database: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ database: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Drops (aka deletes) a Cloud Spanner database. Completed backups
for the database will be retained according to their
- ``expire_time``.
+ ``expire_time``. Note: Cloud Spanner might continue to accept
+ requests for a few seconds after the database has been deleted.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_drop_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DropDatabaseRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ await client.drop_database(request=request)
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.DropDatabaseRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.DropDatabaseRequest, dict]]):
The request object. The request for
[DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
database (:class:`str`):
@@ -623,23 +1128,31 @@ async def drop_database(
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.DropDatabaseRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.DropDatabaseRequest):
+ request = spanner_database_admin.DropDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -648,21 +1161,9 @@ async def drop_database(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.drop_database,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.drop_database
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -670,27 +1171,61 @@ async def drop_database(
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
await rpc(
- request, retry=retry, timeout=timeout, metadata=metadata,
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
async def get_database_ddl(
self,
- request: spanner_database_admin.GetDatabaseDdlRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.GetDatabaseDdlRequest, dict]
+ ] = None,
*,
- database: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ database: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_database_admin.GetDatabaseDdlResponse:
r"""Returns the schema of a Cloud Spanner database as a list of
formatted DDL statements. This method does not show pending
schema updates, those may be queried using the
[Operations][google.longrunning.Operations] API.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_get_database_ddl():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetDatabaseDdlRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ response = await client.get_database_ddl(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlRequest, dict]]):
The request object. The request for
[GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
database (:class:`str`):
@@ -701,29 +1236,37 @@ async def get_database_ddl(
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse:
The response for
- [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+ [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.GetDatabaseDdlRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.GetDatabaseDdlRequest):
+ request = spanner_database_admin.GetDatabaseDdlRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -732,21 +1275,9 @@ async def get_database_ddl(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_database_ddl,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_database_ddl
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -754,20 +1285,28 @@ async def get_database_ddl(
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
async def set_iam_policy(
self,
- request: iam_policy_pb2.SetIamPolicyRequest = None,
+ request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None,
*,
- resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ resource: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> policy_pb2.Policy:
r"""Sets the access control policy on a database or backup resource.
Replaces any existing policy.
@@ -779,10 +1318,36 @@ async def set_iam_policy(
permission on
[resource][google.iam.v1.SetIamPolicyRequest.resource].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_set_iam_policy():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.iam.v1.iam_policy_pb2.SetIamPolicyRequest`):
- The request object. Request message for `SetIamPolicy`
- method.
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]):
+ The request object. Request message for ``SetIamPolicy`` method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being specified. See the
@@ -792,95 +1357,73 @@ async def set_iam_policy(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.policy_pb2.Policy:
- Defines an Identity and Access Management (IAM) policy. It is used to
- specify access control policies for Cloud Platform
- resources.
+ An Identity and Access Management (IAM) policy, which specifies access
+ controls for Google Cloud resources.
A Policy is a collection of bindings. A binding binds
- one or more members to a single role. Members can be
- user accounts, service accounts, Google groups, and
- domains (such as G Suite). A role is a named list of
- permissions (defined by IAM or configured by users).
- A binding can optionally specify a condition, which
- is a logic expression that further constrains the
- role binding based on attributes about the request
- and/or target resource.
-
- **JSON Example**
-
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": ["user:eve@example.com"],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ]
+ one or more members, or principals, to a single role.
+ Principals can be user accounts, service accounts,
+ Google groups, and domains (such as G Suite). A role
+ is a named list of permissions; each role can be an
+ IAM predefined role or a user-created custom role.
- }
+ For some types of Google Cloud resources, a binding
+ can also specify a condition, which is a logical
+ expression that allows access to a resource only if
+ the expression evaluates to true. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the [IAM
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
- **YAML Example**
+ **JSON example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z')
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+
+ **YAML example:**
+
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
- [IAM developer's
- guide](\ https://cloud.google.com/iam/docs).
+ [IAM
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
- request = iam_policy_pb2.SetIamPolicyRequest(resource=resource,)
+ request = iam_policy_pb2.SetIamPolicyRequest(resource=resource)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.set_iam_policy,
- default_timeout=30.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.set_iam_policy
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -888,20 +1431,28 @@ async def set_iam_policy(
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
async def get_iam_policy(
self,
- request: iam_policy_pb2.GetIamPolicyRequest = None,
+ request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
*,
- resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ resource: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> policy_pb2.Policy:
r"""Gets the access control policy for a database or backup
resource. Returns an empty policy if a database or backup exists
@@ -914,10 +1465,36 @@ async def get_iam_policy(
permission on
[resource][google.iam.v1.GetIamPolicyRequest.resource].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_get_iam_policy():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.iam.v1.iam_policy_pb2.GetIamPolicyRequest`):
- The request object. Request message for `GetIamPolicy`
- method.
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]):
+ The request object. Request message for ``GetIamPolicy`` method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being requested. See the
@@ -927,105 +1504,73 @@ async def get_iam_policy(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.policy_pb2.Policy:
- Defines an Identity and Access Management (IAM) policy. It is used to
- specify access control policies for Cloud Platform
- resources.
+ An Identity and Access Management (IAM) policy, which specifies access
+ controls for Google Cloud resources.
A Policy is a collection of bindings. A binding binds
- one or more members to a single role. Members can be
- user accounts, service accounts, Google groups, and
- domains (such as G Suite). A role is a named list of
- permissions (defined by IAM or configured by users).
- A binding can optionally specify a condition, which
- is a logic expression that further constrains the
- role binding based on attributes about the request
- and/or target resource.
-
- **JSON Example**
-
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": ["user:eve@example.com"],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ]
+ one or more members, or principals, to a single role.
+ Principals can be user accounts, service accounts,
+ Google groups, and domains (such as G Suite). A role
+ is a named list of permissions; each role can be an
+ IAM predefined role or a user-created custom role.
- }
+ For some types of Google Cloud resources, a binding
+ can also specify a condition, which is a logical
+ expression that allows access to a resource only if
+ the expression evaluates to true. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the [IAM
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+
+ **JSON example:**
- **YAML Example**
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z')
+ **YAML example:**
+
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
- [IAM developer's
- guide](\ https://cloud.google.com/iam/docs).
+ [IAM
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
- request = iam_policy_pb2.GetIamPolicyRequest(resource=resource,)
+ request = iam_policy_pb2.GetIamPolicyRequest(resource=resource)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_iam_policy,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=30.0,
- ),
- default_timeout=30.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_iam_policy
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1033,21 +1578,29 @@ async def get_iam_policy(
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
async def test_iam_permissions(
self,
- request: iam_policy_pb2.TestIamPermissionsRequest = None,
+ request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None,
*,
- resource: str = None,
- permissions: Sequence[str] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ resource: Optional[str] = None,
+ permissions: Optional[MutableSequence[str]] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns permissions that the caller has on the specified
database or backup resource.
@@ -1060,10 +1613,37 @@ async def test_iam_permissions(
in a NOT_FOUND error if the user has ``spanner.backups.list``
permission on the containing instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_test_iam_permissions():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = await client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest`):
- The request object. Request message for
- `TestIamPermissions` method.
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]):
+ The request object. Request message for ``TestIamPermissions`` method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy detail is being requested. See
@@ -1073,7 +1653,7 @@ async def test_iam_permissions(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- permissions (:class:`Sequence[str]`):
+ permissions (:class:`MutableSequence[str]`):
The set of permissions to check for the ``resource``.
Permissions with wildcards (such as '*' or 'storage.*')
are not allowed. For more information see `IAM
@@ -1082,42 +1662,45 @@ async def test_iam_permissions(
This corresponds to the ``permissions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource, permissions])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource, permissions]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
elif not request:
request = iam_policy_pb2.TestIamPermissionsRequest(
- resource=resource, permissions=permissions,
+ resource=resource, permissions=permissions
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.test_iam_permissions,
- default_timeout=30.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.test_iam_permissions
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1125,22 +1708,30 @@ async def test_iam_permissions(
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
async def create_backup(
self,
- request: gsad_backup.CreateBackupRequest = None,
+ request: Optional[Union[gsad_backup.CreateBackupRequest, dict]] = None,
*,
- parent: str = None,
- backup: gsad_backup.Backup = None,
- backup_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ backup: Optional[gsad_backup.Backup] = None,
+ backup_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation_async.AsyncOperation:
r"""Starts creating a new Cloud Spanner Backup. The returned backup
[long-running operation][google.longrunning.Operation] will have
@@ -1156,8 +1747,39 @@ async def create_backup(
backup creation per database. Backup creation of different
databases can run concurrently.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_create_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ )
+
+ # Make the request
+ operation = client.create_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.CreateBackupRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.CreateBackupRequest, dict]]):
The request object. The request for
[CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
parent (:class:`str`):
@@ -1186,11 +1808,13 @@ async def create_backup(
This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation_async.AsyncOperation:
@@ -1202,16 +1826,22 @@ async def create_backup(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, backup, backup_id])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup, backup_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = gsad_backup.CreateBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup.CreateBackupRequest):
+ request = gsad_backup.CreateBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -1224,11 +1854,9 @@ async def create_backup(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.create_backup,
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_backup
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1236,8 +1864,16 @@ async def create_backup(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation_async.from_gapic(
@@ -1250,21 +1886,229 @@ async def create_backup(
# Done; return the response.
return response
- async def get_backup(
+ async def copy_backup(
self,
- request: backup.GetBackupRequest = None,
+ request: Optional[Union[backup.CopyBackupRequest, dict]] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> backup.Backup:
- r"""Gets metadata on a pending or completed
- [Backup][google.spanner.admin.database.v1.Backup].
-
- Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.GetBackupRequest`):
- The request object. The request for
+ parent: Optional[str] = None,
+ backup_id: Optional[str] = None,
+ source_backup: Optional[str] = None,
+ expire_time: Optional[timestamp_pb2.Timestamp] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Starts copying a Cloud Spanner Backup. The returned backup
+ [long-running operation][google.longrunning.Operation] will have
+ a name of the format
+ ``projects//instances//backups//operations/``
+ and can be used to track copying of the backup. The operation is
+ associated with the destination backup. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Backup][google.spanner.admin.database.v1.Backup], if
+ successful. Cancelling the returned operation will stop the
+ copying and delete the destination backup. Concurrent CopyBackup
+ requests can run on the same source backup.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_copy_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CopyBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ )
+
+ # Make the request
+ operation = client.copy_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.CopyBackupRequest, dict]]):
+ The request object. The request for
+ [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
+ parent (:class:`str`):
+ Required. The name of the destination instance that will
+ contain the backup copy. Values are of the form:
+ ``projects//instances/``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_id (:class:`str`):
+ Required. The id of the backup copy. The ``backup_id``
+ appended to ``parent`` forms the full backup_uri of the
+ form
+ ``projects//instances//backups/``.
+
+ This corresponds to the ``backup_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ source_backup (:class:`str`):
+ Required. The source backup to be copied. The source
+ backup needs to be in READY state for it to be copied.
+ Once CopyBackup is in progress, the source backup cannot
+ be deleted or cleaned up on expiration until CopyBackup
+ is finished. Values are of the form:
+ ``projects//instances//backups/``.
+
+ This corresponds to the ``source_backup`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ expire_time (:class:`google.protobuf.timestamp_pb2.Timestamp`):
+ Required. The expiration time of the backup in
+ microsecond granularity. The expiration time must be at
+ least 6 hours and at most 366 days from the
+ ``create_time`` of the source backup. Once the
+ ``expire_time`` has passed, the backup is eligible to be
+ automatically deleted by Cloud Spanner to free the
+ resources used by the backup.
+
+ This corresponds to the ``expire_time`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.spanner_admin_database_v1.types.Backup`
+ A backup of a Cloud Spanner database.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup_id, source_backup, expire_time]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.CopyBackupRequest):
+ request = backup.CopyBackupRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if backup_id is not None:
+ request.backup_id = backup_id
+ if source_backup is not None:
+ request.source_backup = source_backup
+ if expire_time is not None:
+ request.expire_time = expire_time
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.copy_backup
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ backup.Backup,
+ metadata_type=backup.CopyBackupMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_backup(
+ self,
+ request: Optional[Union[backup.GetBackupRequest, dict]] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup.Backup:
+ r"""Gets metadata on a pending or completed
+ [Backup][google.spanner.admin.database.v1.Backup].
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_get_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_backup(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.GetBackupRequest, dict]]):
+ The request object. The request for
[GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
name (:class:`str`):
Required. Name of the backup. Values are of the form
@@ -1273,27 +2117,35 @@ async def get_backup(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Backup:
A backup of a Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = backup.GetBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.GetBackupRequest):
+ request = backup.GetBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -1302,21 +2154,9 @@ async def get_backup(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_backup,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_backup
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1324,27 +2164,60 @@ async def get_backup(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
async def update_backup(
self,
- request: gsad_backup.UpdateBackupRequest = None,
+ request: Optional[Union[gsad_backup.UpdateBackupRequest, dict]] = None,
*,
- backup: gsad_backup.Backup = None,
- update_mask: field_mask_pb2.FieldMask = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ backup: Optional[gsad_backup.Backup] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> gsad_backup.Backup:
r"""Updates a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_update_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateBackupRequest(
+ )
+
+ # Make the request
+ response = await client.update_backup(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.UpdateBackupRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.UpdateBackupRequest, dict]]):
The request object. The request for
[UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
backup (:class:`google.cloud.spanner_admin_database_v1.types.Backup`):
@@ -1353,7 +2226,7 @@ async def update_backup(
required. Other fields are ignored. Update is only
supported for the following fields:
- - ``backup.expire_time``.
+ - ``backup.expire_time``.
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -1370,27 +2243,35 @@ async def update_backup(
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Backup:
A backup of a Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([backup, update_mask])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [backup, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = gsad_backup.UpdateBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup.UpdateBackupRequest):
+ request = gsad_backup.UpdateBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -1401,21 +2282,9 @@ async def update_backup(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.update_backup,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_backup
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1425,26 +2294,57 @@ async def update_backup(
),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
async def delete_backup(
self,
- request: backup.DeleteBackupRequest = None,
+ request: Optional[Union[backup.DeleteBackupRequest, dict]] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Deletes a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_delete_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DeleteBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_backup(request=request)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.DeleteBackupRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.DeleteBackupRequest, dict]]):
The request object. The request for
[DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
name (:class:`str`):
@@ -1455,23 +2355,31 @@ async def delete_backup(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = backup.DeleteBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.DeleteBackupRequest):
+ request = backup.DeleteBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -1480,21 +2388,9 @@ async def delete_backup(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.delete_backup,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_backup
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1502,26 +2398,59 @@ async def delete_backup(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
await rpc(
- request, retry=retry, timeout=timeout, metadata=metadata,
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
async def list_backups(
self,
- request: backup.ListBackupsRequest = None,
+ request: Optional[Union[backup.ListBackupsRequest, dict]] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListBackupsAsyncPager:
r"""Lists completed and pending backups. Backups returned are
ordered by ``create_time`` in descending order, starting from
the most recent ``create_time``.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_list_backups():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backups(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.ListBackupsRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListBackupsRequest, dict]]):
The request object. The request for
[ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
parent (:class:`str`):
@@ -1531,32 +2460,40 @@ async def list_backups(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupsAsyncPager:
The response for
- [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+ [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = backup.ListBackupsRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.ListBackupsRequest):
+ request = backup.ListBackupsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -1565,21 +2502,9 @@ async def list_backups(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_backups,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_backups
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1587,13 +2512,26 @@ async def list_backups(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListBackupsAsyncPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
@@ -1601,14 +2539,16 @@ async def list_backups(
async def restore_database(
self,
- request: spanner_database_admin.RestoreDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.RestoreDatabaseRequest, dict]
+ ] = None,
*,
- parent: str = None,
- database_id: str = None,
- backup: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ database_id: Optional[str] = None,
+ backup: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation_async.AsyncOperation:
r"""Create a new database by restoring from a completed backup. The
new database must be in the same project and in an instance with
@@ -1630,8 +2570,40 @@ async def restore_database(
without waiting for the optimize operation associated with the
first restore to complete.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_restore_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.RestoreDatabaseRequest(
+ backup="backup_value",
+ parent="parent_value",
+ database_id="database_id_value",
+ )
+
+ # Make the request
+ operation = client.restore_database(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.RestoreDatabaseRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.RestoreDatabaseRequest, dict]]):
The request object. The request for
[RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
parent (:class:`str`):
@@ -1662,11 +2634,13 @@ async def restore_database(
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation_async.AsyncOperation:
@@ -1678,16 +2652,22 @@ async def restore_database(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, database_id, backup])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, database_id, backup]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.RestoreDatabaseRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.RestoreDatabaseRequest):
+ request = spanner_database_admin.RestoreDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -1700,11 +2680,9 @@ async def restore_database(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.restore_database,
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.restore_database
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1712,8 +2690,16 @@ async def restore_database(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation_async.from_gapic(
@@ -1728,12 +2714,14 @@ async def restore_database(
async def list_database_operations(
self,
- request: spanner_database_admin.ListDatabaseOperationsRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabaseOperationsRequest, dict]
+ ] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabaseOperationsAsyncPager:
r"""Lists database
[longrunning-operations][google.longrunning.Operation]. A
@@ -1746,8 +2734,35 @@ async def list_database_operations(
completed/failed/canceled within the last 7 days, and pending
operations.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_list_database_operations():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListDatabaseOperationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_database_operations(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest, dict]]):
The request object. The request for
[ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
parent (:class:`str`):
@@ -1758,11 +2773,13 @@ async def list_database_operations(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseOperationsAsyncPager:
@@ -1774,16 +2791,24 @@ async def list_database_operations(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.ListDatabaseOperationsRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_database_admin.ListDatabaseOperationsRequest
+ ):
+ request = spanner_database_admin.ListDatabaseOperationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -1792,21 +2817,9 @@ async def list_database_operations(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_database_operations,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_database_operations
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1814,13 +2827,26 @@ async def list_database_operations(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListDatabaseOperationsAsyncPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
@@ -1828,12 +2854,12 @@ async def list_database_operations(
async def list_backup_operations(
self,
- request: backup.ListBackupOperationsRequest = None,
+ request: Optional[Union[backup.ListBackupOperationsRequest, dict]] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListBackupOperationsAsyncPager:
r"""Lists the backup [long-running
operations][google.longrunning.Operation] in the given instance.
@@ -1848,8 +2874,35 @@ async def list_backup_operations(
``operation.metadata.value.progress.start_time`` in descending
order starting from the most recently started operation.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_list_backup_operations():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupOperationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backup_operations(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest, dict]]):
The request object. The request for
[ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
parent (:class:`str`):
@@ -1860,11 +2913,13 @@ async def list_backup_operations(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsAsyncPager:
@@ -1876,16 +2931,22 @@ async def list_backup_operations(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = backup.ListBackupOperationsRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.ListBackupOperationsRequest):
+ request = backup.ListBackupOperationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -1894,21 +2955,9 @@ async def list_backup_operations(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_backup_operations,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_backup_operations
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1916,27 +2965,1254 @@ async def list_backup_operations(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListBackupOperationsAsyncPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
return response
+ async def list_database_roles(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabaseRolesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListDatabaseRolesAsyncPager:
+ r"""Lists Cloud Spanner database roles.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_list_database_roles():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListDatabaseRolesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_database_roles(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
-try:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
- gapic_version=pkg_resources.get_distribution(
- "google-cloud-spanner-admin-database",
- ).version,
- )
-except pkg_resources.DistributionNotFound:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesRequest, dict]]):
+ The request object. The request for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+ parent (:class:`str`):
+ Required. The database whose roles should be listed.
+ Values are of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseRolesAsyncPager:
+ The response for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.ListDatabaseRolesRequest):
+ request = spanner_database_admin.ListDatabaseRolesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_database_roles
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListDatabaseRolesAsyncPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def add_split_points(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.AddSplitPointsRequest, dict]
+ ] = None,
+ *,
+ database: Optional[str] = None,
+ split_points: Optional[
+ MutableSequence[spanner_database_admin.SplitPoints]
+ ] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.AddSplitPointsResponse:
+ r"""Adds split points to specified tables, indexes of a
+ database.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_add_split_points():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.AddSplitPointsRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ response = await client.add_split_points(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.AddSplitPointsRequest, dict]]):
+ The request object. The request for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+ database (:class:`str`):
+ Required. The database on whose tables/indexes split
+ points are to be added. Values are of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ split_points (:class:`MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints]`):
+ Required. The split points to add.
+ This corresponds to the ``split_points`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.AddSplitPointsResponse:
+ The response for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, split_points]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.AddSplitPointsRequest):
+ request = spanner_database_admin.AddSplitPointsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if split_points:
+ request.split_points.extend(split_points)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.add_split_points
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def create_backup_schedule(
+ self,
+ request: Optional[
+ Union[gsad_backup_schedule.CreateBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ backup_schedule: Optional[gsad_backup_schedule.BackupSchedule] = None,
+ backup_schedule_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Creates a new backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_create_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateBackupScheduleRequest(
+ parent="parent_value",
+ backup_schedule_id="backup_schedule_id_value",
+ )
+
+ # Make the request
+ response = await client.create_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.CreateBackupScheduleRequest, dict]]):
+ The request object. The request for
+ [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
+ parent (:class:`str`):
+ Required. The name of the database
+ that this backup schedule applies to.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_schedule (:class:`google.cloud.spanner_admin_database_v1.types.BackupSchedule`):
+ Required. The backup schedule to
+ create.
+
+ This corresponds to the ``backup_schedule`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_schedule_id (:class:`str`):
+ Required. The Id to use for the backup schedule. The
+ ``backup_schedule_id`` appended to ``parent`` forms the
+ full backup schedule name of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``backup_schedule_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup_schedule, backup_schedule_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup_schedule.CreateBackupScheduleRequest):
+ request = gsad_backup_schedule.CreateBackupScheduleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if backup_schedule is not None:
+ request.backup_schedule = backup_schedule
+ if backup_schedule_id is not None:
+ request.backup_schedule_id = backup_schedule_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_backup_schedule
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_backup_schedule(
+ self,
+ request: Optional[Union[backup_schedule.GetBackupScheduleRequest, dict]] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup_schedule.BackupSchedule:
+ r"""Gets backup schedule for the input schedule name.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_get_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetBackupScheduleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.GetBackupScheduleRequest, dict]]):
+ The request object. The request for
+ [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
+ name (:class:`str`):
+ Required. The name of the schedule to retrieve. Values
+ are of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.GetBackupScheduleRequest):
+ request = backup_schedule.GetBackupScheduleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_backup_schedule
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_backup_schedule(
+ self,
+ request: Optional[
+ Union[gsad_backup_schedule.UpdateBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ backup_schedule: Optional[gsad_backup_schedule.BackupSchedule] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Updates a backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_update_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateBackupScheduleRequest(
+ )
+
+ # Make the request
+ response = await client.update_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.UpdateBackupScheduleRequest, dict]]):
+ The request object. The request for
+ [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
+ backup_schedule (:class:`google.cloud.spanner_admin_database_v1.types.BackupSchedule`):
+ Required. The backup schedule to update.
+ ``backup_schedule.name``, and the fields to be updated
+ as specified by ``update_mask`` are required. Other
+ fields are ignored.
+
+ This corresponds to the ``backup_schedule`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. A mask specifying which
+ fields in the BackupSchedule resource
+ should be updated. This mask is relative
+ to the BackupSchedule resource, not to
+ the request message. The field mask must
+ always be specified; this prevents any
+ future fields from being erased
+ accidentally.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [backup_schedule, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup_schedule.UpdateBackupScheduleRequest):
+ request = gsad_backup_schedule.UpdateBackupScheduleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if backup_schedule is not None:
+ request.backup_schedule = backup_schedule
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_backup_schedule
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("backup_schedule.name", request.backup_schedule.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_backup_schedule(
+ self,
+ request: Optional[
+ Union[backup_schedule.DeleteBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_delete_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DeleteBackupScheduleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_backup_schedule(request=request)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.DeleteBackupScheduleRequest, dict]]):
+ The request object. The request for
+ [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
+ name (:class:`str`):
+ Required. The name of the schedule to delete. Values are
+ of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.DeleteBackupScheduleRequest):
+ request = backup_schedule.DeleteBackupScheduleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_backup_schedule
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def list_backup_schedules(
+ self,
+ request: Optional[
+ Union[backup_schedule.ListBackupSchedulesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListBackupSchedulesAsyncPager:
+ r"""Lists all the backup schedules for the database.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_list_backup_schedules():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupSchedulesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backup_schedules(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesRequest, dict]]):
+ The request object. The request for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+ parent (:class:`str`):
+ Required. Database is the parent
+ resource whose backup schedules should
+ be listed. Values are of the form
+ projects//instances//databases/
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupSchedulesAsyncPager:
+ The response for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.ListBackupSchedulesRequest):
+ request = backup_schedule.ListBackupSchedulesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_backup_schedules
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListBackupSchedulesAsyncPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def internal_update_graph_operation(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.InternalUpdateGraphOperationRequest, dict]
+ ] = None,
+ *,
+ database: Optional[str] = None,
+ operation_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.InternalUpdateGraphOperationResponse:
+ r"""This is an internal API called by Spanner Graph jobs.
+ You should never need to call this API directly.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_internal_update_graph_operation():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.InternalUpdateGraphOperationRequest(
+ database="database_value",
+ operation_id="operation_id_value",
+ vm_identity_token="vm_identity_token_value",
+ )
+
+ # Make the request
+ response = await client.internal_update_graph_operation(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.InternalUpdateGraphOperationRequest, dict]]):
+ The request object. Internal request proto, do not use
+ directly.
+ database (:class:`str`):
+ Internal field, do not use directly.
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ operation_id (:class:`str`):
+ Internal field, do not use directly.
+ This corresponds to the ``operation_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.InternalUpdateGraphOperationResponse:
+ Internal response proto, do not use
+ directly.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, operation_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_database_admin.InternalUpdateGraphOperationRequest
+ ):
+ request = spanner_database_admin.InternalUpdateGraphOperationRequest(
+ request
+ )
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if operation_id is not None:
+ request.operation_id = operation_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.internal_update_graph_operation
+ ]
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def list_operations(
+ self,
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.ListOperationsResponse:
+ r"""Lists operations that match the specified filter in the request.
+
+ Args:
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
+ The request object. Request message for
+ `ListOperations` method.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ ~.operations_pb2.ListOperationsResponse:
+ Response message for ``ListOperations`` method.
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.ListOperationsRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self.transport._wrapped_methods[self._client._transport.list_operations]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_operation(
+ self,
+ request: Optional[operations_pb2.GetOperationRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Gets the latest state of a long-running operation.
+
+ Args:
+ request (:class:`~.operations_pb2.GetOperationRequest`):
+ The request object. Request message for
+ `GetOperation` method.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ ~.operations_pb2.Operation:
+ An ``Operation`` object.
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.GetOperationRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self.transport._wrapped_methods[self._client._transport.get_operation]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_operation(
+ self,
+ request: Optional[operations_pb2.DeleteOperationRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a long-running operation.
+
+ This method indicates that the client is no longer interested
+ in the operation result. It does not cancel the operation.
+ If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`.
+
+ Args:
+ request (:class:`~.operations_pb2.DeleteOperationRequest`):
+ The request object. Request message for
+ `DeleteOperation` method.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ None
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.DeleteOperationRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self.transport._wrapped_methods[self._client._transport.delete_operation]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def cancel_operation(
+ self,
+ request: Optional[operations_pb2.CancelOperationRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Starts asynchronous cancellation on a long-running operation.
+
+ The server makes a best effort to cancel the operation, but success
+ is not guaranteed. If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`.
+
+ Args:
+ request (:class:`~.operations_pb2.CancelOperationRequest`):
+ The request object. Request message for
+ `CancelOperation` method.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ None
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.CancelOperationRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self.transport._wrapped_methods[self._client._transport.cancel_operation]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def __aenter__(self) -> "DatabaseAdminAsyncClient":
+ return self
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self.transport.close()
+
+
+DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=package_version.__version__
+)
+
+if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
+ DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
__all__ = ("DatabaseAdminAsyncClient",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
index 1100d160c5..5f85aa39b1 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,38 +14,77 @@
# limitations under the License.
#
from collections import OrderedDict
-from distutils import util
+from http import HTTPStatus
+import json
+import logging as std_logging
import os
import re
-from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
-import pkg_resources
-
-from google.api_core import client_options as client_options_lib # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from typing import (
+ Dict,
+ Callable,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
+import uuid
+import warnings
+
+from google.cloud.spanner_admin_database_v1 import gapic_version as package_version
+
+from google.api_core import client_options as client_options_lib
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
+import google.protobuf
+
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.spanner_admin_database_v1.services.database_admin import pagers
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import common
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import DatabaseAdminGrpcTransport
from .transports.grpc_asyncio import DatabaseAdminGrpcAsyncIOTransport
+from .transports.rest import DatabaseAdminRestTransport
class DatabaseAdminClientMeta(type):
@@ -59,8 +98,12 @@ class DatabaseAdminClientMeta(type):
_transport_registry = OrderedDict() # type: Dict[str, Type[DatabaseAdminTransport]]
_transport_registry["grpc"] = DatabaseAdminGrpcTransport
_transport_registry["grpc_asyncio"] = DatabaseAdminGrpcAsyncIOTransport
+ _transport_registry["rest"] = DatabaseAdminRestTransport
- def get_transport_class(cls, label: str = None,) -> Type[DatabaseAdminTransport]:
+ def get_transport_class(
+ cls,
+ label: Optional[str] = None,
+ ) -> Type[DatabaseAdminTransport]:
"""Returns an appropriate transport class.
Args:
@@ -81,11 +124,13 @@ def get_transport_class(cls, label: str = None,) -> Type[DatabaseAdminTransport]
class DatabaseAdminClient(metaclass=DatabaseAdminClientMeta):
"""Cloud Spanner Database Admin API
- The Cloud Spanner Database Admin API can be used to create,
- drop, and list databases. It also enables updating the schema of
- pre-existing databases. It can be also used to create, delete
- and list backups for a database and to restore from an existing
- backup.
+
+ The Cloud Spanner Database Admin API can be used to:
+
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
"""
@staticmethod
@@ -118,11 +163,15 @@ def _get_default_mtls_endpoint(api_endpoint):
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
DEFAULT_ENDPOINT = "spanner.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
+ _DEFAULT_ENDPOINT_TEMPLATE = "spanner.{UNIVERSE_DOMAIN}"
+ _DEFAULT_UNIVERSE = "googleapis.com"
+
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
@@ -171,10 +220,16 @@ def transport(self) -> DatabaseAdminTransport:
return self._transport
@staticmethod
- def backup_path(project: str, instance: str, backup: str,) -> str:
+ def backup_path(
+ project: str,
+ instance: str,
+ backup: str,
+ ) -> str:
"""Returns a fully-qualified backup string."""
return "projects/{project}/instances/{instance}/backups/{backup}".format(
- project=project, instance=instance, backup=backup,
+ project=project,
+ instance=instance,
+ backup=backup,
)
@staticmethod
@@ -186,9 +241,36 @@ def parse_backup_path(path: str) -> Dict[str, str]:
)
return m.groupdict() if m else {}
+ @staticmethod
+ def backup_schedule_path(
+ project: str,
+ instance: str,
+ database: str,
+ schedule: str,
+ ) -> str:
+ """Returns a fully-qualified backup_schedule string."""
+ return "projects/{project}/instances/{instance}/databases/{database}/backupSchedules/{schedule}".format(
+ project=project,
+ instance=instance,
+ database=database,
+ schedule=schedule,
+ )
+
+ @staticmethod
+ def parse_backup_schedule_path(path: str) -> Dict[str, str]:
+ """Parses a backup_schedule path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/instances/(?P.+?)/databases/(?P.+?)/backupSchedules/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
@staticmethod
def crypto_key_path(
- project: str, location: str, key_ring: str, crypto_key: str,
+ project: str,
+ location: str,
+ key_ring: str,
+ crypto_key: str,
) -> str:
"""Returns a fully-qualified crypto_key string."""
return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format(
@@ -234,10 +316,16 @@ def parse_crypto_key_version_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def database_path(project: str, instance: str, database: str,) -> str:
+ def database_path(
+ project: str,
+ instance: str,
+ database: str,
+ ) -> str:
"""Returns a fully-qualified database string."""
return "projects/{project}/instances/{instance}/databases/{database}".format(
- project=project, instance=instance, database=database,
+ project=project,
+ instance=instance,
+ database=database,
)
@staticmethod
@@ -250,10 +338,38 @@ def parse_database_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def instance_path(project: str, instance: str,) -> str:
+ def database_role_path(
+ project: str,
+ instance: str,
+ database: str,
+ role: str,
+ ) -> str:
+ """Returns a fully-qualified database_role string."""
+ return "projects/{project}/instances/{instance}/databases/{database}/databaseRoles/{role}".format(
+ project=project,
+ instance=instance,
+ database=database,
+ role=role,
+ )
+
+ @staticmethod
+ def parse_database_role_path(path: str) -> Dict[str, str]:
+ """Parses a database_role path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/instances/(?P.+?)/databases/(?P.+?)/databaseRoles/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def instance_path(
+ project: str,
+ instance: str,
+ ) -> str:
"""Returns a fully-qualified instance string."""
return "projects/{project}/instances/{instance}".format(
- project=project, instance=instance,
+ project=project,
+ instance=instance,
)
@staticmethod
@@ -263,7 +379,31 @@ def parse_instance_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def common_billing_account_path(billing_account: str,) -> str:
+ def instance_partition_path(
+ project: str,
+ instance: str,
+ instance_partition: str,
+ ) -> str:
+ """Returns a fully-qualified instance_partition string."""
+ return "projects/{project}/instances/{instance}/instancePartitions/{instance_partition}".format(
+ project=project,
+ instance=instance,
+ instance_partition=instance_partition,
+ )
+
+ @staticmethod
+ def parse_instance_partition_path(path: str) -> Dict[str, str]:
+ """Parses a instance_partition path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/instances/(?P.+?)/instancePartitions/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(
+ billing_account: str,
+ ) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
@@ -276,9 +416,13 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def common_folder_path(folder: str,) -> str:
+ def common_folder_path(
+ folder: str,
+ ) -> str:
"""Returns a fully-qualified folder string."""
- return "folders/{folder}".format(folder=folder,)
+ return "folders/{folder}".format(
+ folder=folder,
+ )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
@@ -287,9 +431,13 @@ def parse_common_folder_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def common_organization_path(organization: str,) -> str:
+ def common_organization_path(
+ organization: str,
+ ) -> str:
"""Returns a fully-qualified organization string."""
- return "organizations/{organization}".format(organization=organization,)
+ return "organizations/{organization}".format(
+ organization=organization,
+ )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
@@ -298,9 +446,13 @@ def parse_common_organization_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def common_project_path(project: str,) -> str:
+ def common_project_path(
+ project: str,
+ ) -> str:
"""Returns a fully-qualified project string."""
- return "projects/{project}".format(project=project,)
+ return "projects/{project}".format(
+ project=project,
+ )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
@@ -309,10 +461,14 @@ def parse_common_project_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def common_location_path(project: str, location: str,) -> str:
+ def common_location_path(
+ project: str,
+ location: str,
+ ) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
- project=project, location=location,
+ project=project,
+ location=location,
)
@staticmethod
@@ -321,12 +477,251 @@ def parse_common_location_path(path: str) -> Dict[str, str]:
m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
return m.groupdict() if m else {}
+ @classmethod
+ def get_mtls_endpoint_and_cert_source(
+ cls, client_options: Optional[client_options_lib.ClientOptions] = None
+ ):
+ """Deprecated. Return the API endpoint and client cert source for mutual TLS.
+
+ The client cert source is determined in the following order:
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
+ client cert source is None.
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
+ default client cert source exists, use the default one; otherwise the client cert
+ source is None.
+
+ The API endpoint is determined in the following order:
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
+ default mTLS endpoint; if the environment variable is "never", use the default API
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
+ use the default API endpoint.
+
+ More details can be found at https://google.aip.dev/auth/4114.
+
+ Args:
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
+ in this method.
+
+ Returns:
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
+ client cert source to use.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
+ """
+
+ warnings.warn(
+ "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.",
+ DeprecationWarning,
+ )
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+ use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_client_cert not in ("true", "false"):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ if use_mtls_endpoint not in ("auto", "never", "always"):
+ raise MutualTLSChannelError(
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Figure out the client cert source to use.
+ client_cert_source = None
+ if use_client_cert == "true":
+ if client_options.client_cert_source:
+ client_cert_source = client_options.client_cert_source
+ elif mtls.has_default_client_cert_source():
+ client_cert_source = mtls.default_client_cert_source()
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ elif use_mtls_endpoint == "always" or (
+ use_mtls_endpoint == "auto" and client_cert_source
+ ):
+ api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
+ else:
+ api_endpoint = cls.DEFAULT_ENDPOINT
+
+ return api_endpoint, client_cert_source
+
+ @staticmethod
+ def _read_environment_variables():
+ """Returns the environment variables used by the client.
+
+ Returns:
+ Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE,
+ GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables.
+
+ Raises:
+ ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not
+ any of ["true", "false"].
+ google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
+ is not any of ["auto", "never", "always"].
+ """
+ use_client_cert = os.getenv(
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
+ ).lower()
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
+ universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
+ if use_client_cert not in ("true", "false"):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ if use_mtls_endpoint not in ("auto", "never", "always"):
+ raise MutualTLSChannelError(
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+ return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
+
+ @staticmethod
+ def _get_client_cert_source(provided_cert_source, use_cert_flag):
+ """Return the client cert source to be used by the client.
+
+ Args:
+ provided_cert_source (bytes): The client certificate source provided.
+ use_cert_flag (bool): A flag indicating whether to use the client certificate.
+
+ Returns:
+ bytes or None: The client cert source to be used by the client.
+ """
+ client_cert_source = None
+ if use_cert_flag:
+ if provided_cert_source:
+ client_cert_source = provided_cert_source
+ elif mtls.has_default_client_cert_source():
+ client_cert_source = mtls.default_client_cert_source()
+ return client_cert_source
+
+ @staticmethod
+ def _get_api_endpoint(
+ api_override, client_cert_source, universe_domain, use_mtls_endpoint
+ ):
+ """Return the API endpoint used by the client.
+
+ Args:
+ api_override (str): The API endpoint override. If specified, this is always
+ the return value of this function and the other arguments are not used.
+ client_cert_source (bytes): The client certificate source used by the client.
+ universe_domain (str): The universe domain used by the client.
+ use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters.
+ Possible values are "always", "auto", or "never".
+
+ Returns:
+ str: The API endpoint to be used by the client.
+ """
+ if api_override is not None:
+ api_endpoint = api_override
+ elif use_mtls_endpoint == "always" or (
+ use_mtls_endpoint == "auto" and client_cert_source
+ ):
+ _default_universe = DatabaseAdminClient._DEFAULT_UNIVERSE
+ if universe_domain != _default_universe:
+ raise MutualTLSChannelError(
+ f"mTLS is not supported in any universe other than {_default_universe}."
+ )
+ api_endpoint = DatabaseAdminClient.DEFAULT_MTLS_ENDPOINT
+ else:
+ api_endpoint = DatabaseAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=universe_domain
+ )
+ return api_endpoint
+
+ @staticmethod
+ def _get_universe_domain(
+ client_universe_domain: Optional[str], universe_domain_env: Optional[str]
+ ) -> str:
+ """Return the universe domain used by the client.
+
+ Args:
+ client_universe_domain (Optional[str]): The universe domain configured via the client options.
+ universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable.
+
+ Returns:
+ str: The universe domain to be used by the client.
+
+ Raises:
+ ValueError: If the universe domain is an empty string.
+ """
+ universe_domain = DatabaseAdminClient._DEFAULT_UNIVERSE
+ if client_universe_domain is not None:
+ universe_domain = client_universe_domain
+ elif universe_domain_env is not None:
+ universe_domain = universe_domain_env
+ if len(universe_domain.strip()) == 0:
+ raise ValueError("Universe Domain cannot be an empty string.")
+ return universe_domain
+
+ def _validate_universe_domain(self):
+ """Validates client's and credentials' universe domains are consistent.
+
+ Returns:
+ bool: True iff the configured universe domain is valid.
+
+ Raises:
+ ValueError: If the configured universe domain is not valid.
+ """
+
+ # NOTE (b/349488459): universe validation is disabled until further notice.
+ return True
+
+ def _add_cred_info_for_auth_errors(
+ self, error: core_exceptions.GoogleAPICallError
+ ) -> None:
+ """Adds credential info string to error details for 401/403/404 errors.
+
+ Args:
+ error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info.
+ """
+ if error.code not in [
+ HTTPStatus.UNAUTHORIZED,
+ HTTPStatus.FORBIDDEN,
+ HTTPStatus.NOT_FOUND,
+ ]:
+ return
+
+ cred = self._transport._credentials
+
+ # get_cred_info is only available in google-auth>=2.35.0
+ if not hasattr(cred, "get_cred_info"):
+ return
+
+ # ignore the type check since pypy test fails when get_cred_info
+ # is not available
+ cred_info = cred.get_cred_info() # type: ignore
+ if cred_info and hasattr(error._details, "append"):
+ error._details.append(json.dumps(cred_info))
+
+ @property
+ def api_endpoint(self):
+ """Return the API endpoint used by the client instance.
+
+ Returns:
+ str: The API endpoint used by the client instance.
+ """
+ return self._api_endpoint
+
+ @property
+ def universe_domain(self) -> str:
+ """Return the universe domain used by the client instance.
+
+ Returns:
+ str: The universe domain used by the client instance.
+ """
+ return self._universe_domain
+
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
- transport: Union[str, DatabaseAdminTransport, None] = None,
- client_options: Optional[client_options_lib.ClientOptions] = None,
+ transport: Optional[
+ Union[str, DatabaseAdminTransport, Callable[..., DatabaseAdminTransport]]
+ ] = None,
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the database admin client.
@@ -337,25 +732,37 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- transport (Union[str, DatabaseAdminTransport]): The
- transport to use. If set to None, a transport is chosen
- automatically.
- client_options (google.api_core.client_options.ClientOptions): Custom options for the
- client. It won't take effect if a ``transport`` instance is provided.
- (1) The ``api_endpoint`` property can be used to override the
- default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
- environment variable can also be used to override the endpoint:
+ transport (Optional[Union[str,DatabaseAdminTransport,Callable[..., DatabaseAdminTransport]]]):
+ The transport to use, or a Callable that constructs and returns a new transport.
+ If a Callable is given, it will be called with the same set of initialization
+ arguments as used in the DatabaseAdminTransport constructor.
+ If set to None, a transport is chosen automatically.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
+ Custom options for the client.
+
+ 1. The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client when ``transport`` is
+ not explicitly provided. Only if this property is not set and
+ ``transport`` was not explicitly provided, the endpoint is
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
+ variable, which have one of the following values:
"always" (always use the default mTLS endpoint), "never" (always
- use the default regular endpoint) and "auto" (auto switch to the
- default mTLS endpoint if client certificate is present, this is
- the default value). However, the ``api_endpoint`` property takes
- precedence if provided.
- (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ use the default regular endpoint) and "auto" (auto-switch to the
+ default mTLS endpoint if client certificate is present; this is
+ the default value).
+
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
- to provide client certificate for mutual TLS transport. If
+ to provide a client certificate for mTLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
+
+ 3. The ``universe_domain`` property can be used to override the
+ default "googleapis.com" universe. Note that the ``api_endpoint``
+ property still takes precedence; and ``universe_domain`` is
+ currently not supported for mTLS.
+
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
@@ -366,94 +773,167 @@ def __init__(
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
- if isinstance(client_options, dict):
- client_options = client_options_lib.from_dict(client_options)
- if client_options is None:
- client_options = client_options_lib.ClientOptions()
+ self._client_options = client_options
+ if isinstance(self._client_options, dict):
+ self._client_options = client_options_lib.from_dict(self._client_options)
+ if self._client_options is None:
+ self._client_options = client_options_lib.ClientOptions()
+ self._client_options = cast(
+ client_options_lib.ClientOptions, self._client_options
+ )
- # Create SSL credentials for mutual TLS if needed.
- use_client_cert = bool(
- util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ universe_domain_opt = getattr(self._client_options, "universe_domain", None)
+
+ (
+ self._use_client_cert,
+ self._use_mtls_endpoint,
+ self._universe_domain_env,
+ ) = DatabaseAdminClient._read_environment_variables()
+ self._client_cert_source = DatabaseAdminClient._get_client_cert_source(
+ self._client_options.client_cert_source, self._use_client_cert
+ )
+ self._universe_domain = DatabaseAdminClient._get_universe_domain(
+ universe_domain_opt, self._universe_domain_env
)
+ self._api_endpoint = None # updated below, depending on `transport`
- client_cert_source_func = None
- is_mtls = False
- if use_client_cert:
- if client_options.client_cert_source:
- is_mtls = True
- client_cert_source_func = client_options.client_cert_source
- else:
- is_mtls = mtls.has_default_client_cert_source()
- if is_mtls:
- client_cert_source_func = mtls.default_client_cert_source()
- else:
- client_cert_source_func = None
+ # Initialize the universe domain validation.
+ self._is_universe_domain_valid = False
- # Figure out which api endpoint to use.
- if client_options.api_endpoint is not None:
- api_endpoint = client_options.api_endpoint
- else:
- use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
- if use_mtls_env == "never":
- api_endpoint = self.DEFAULT_ENDPOINT
- elif use_mtls_env == "always":
- api_endpoint = self.DEFAULT_MTLS_ENDPOINT
- elif use_mtls_env == "auto":
- if is_mtls:
- api_endpoint = self.DEFAULT_MTLS_ENDPOINT
- else:
- api_endpoint = self.DEFAULT_ENDPOINT
- else:
- raise MutualTLSChannelError(
- "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
- "values: never, auto, always"
- )
+ if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER
+ # Setup logging.
+ client_logging.initialize_logging()
+
+ api_key_value = getattr(self._client_options, "api_key", None)
+ if api_key_value and credentials:
+ raise ValueError(
+ "client_options.api_key and credentials are mutually exclusive"
+ )
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
- if isinstance(transport, DatabaseAdminTransport):
+ transport_provided = isinstance(transport, DatabaseAdminTransport)
+ if transport_provided:
# transport is a DatabaseAdminTransport instance.
- if credentials or client_options.credentials_file:
+ if credentials or self._client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
- if client_options.scopes:
+ if self._client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
- self._transport = transport
- else:
- Transport = type(self).get_transport_class(transport)
- self._transport = Transport(
+ self._transport = cast(DatabaseAdminTransport, transport)
+ self._api_endpoint = self._transport.host
+
+ self._api_endpoint = (
+ self._api_endpoint
+ or DatabaseAdminClient._get_api_endpoint(
+ self._client_options.api_endpoint,
+ self._client_cert_source,
+ self._universe_domain,
+ self._use_mtls_endpoint,
+ )
+ )
+
+ if not transport_provided:
+ import google.auth._default # type: ignore
+
+ if api_key_value and hasattr(
+ google.auth._default, "get_api_key_credentials"
+ ):
+ credentials = google.auth._default.get_api_key_credentials(
+ api_key_value
+ )
+
+ transport_init: Union[
+ Type[DatabaseAdminTransport], Callable[..., DatabaseAdminTransport]
+ ] = (
+ DatabaseAdminClient.get_transport_class(transport)
+ if isinstance(transport, str) or transport is None
+ else cast(Callable[..., DatabaseAdminTransport], transport)
+ )
+ # initialize with the provided callable or the passed in class
+ self._transport = transport_init(
credentials=credentials,
- credentials_file=client_options.credentials_file,
- host=api_endpoint,
- scopes=client_options.scopes,
- client_cert_source_for_mtls=client_cert_source_func,
- quota_project_id=client_options.quota_project_id,
+ credentials_file=self._client_options.credentials_file,
+ host=self._api_endpoint,
+ scopes=self._client_options.scopes,
+ client_cert_source_for_mtls=self._client_cert_source,
+ quota_project_id=self._client_options.quota_project_id,
client_info=client_info,
- always_use_jwt_access=(
- Transport == type(self).get_transport_class("grpc")
- or Transport == type(self).get_transport_class("grpc_asyncio")
- ),
+ always_use_jwt_access=True,
+ api_audience=self._client_options.api_audience,
)
+ if "async" not in str(self._transport):
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ ): # pragma: NO COVER
+ _LOGGER.debug(
+ "Created client `google.spanner.admin.database_v1.DatabaseAdminClient`.",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "universeDomain": getattr(
+ self._transport._credentials, "universe_domain", ""
+ ),
+ "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}",
+ "credentialsInfo": getattr(
+ self.transport._credentials, "get_cred_info", lambda: None
+ )(),
+ }
+ if hasattr(self._transport, "_credentials")
+ else {
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "credentialsType": None,
+ },
+ )
+
def list_databases(
self,
- request: spanner_database_admin.ListDatabasesRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabasesRequest, dict]
+ ] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabasesPager:
r"""Lists Cloud Spanner databases.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_list_databases():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListDatabasesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_databases(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest, dict]):
The request object. The request for
[ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
parent (str):
@@ -467,32 +947,35 @@ def list_databases(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabasesPager:
The response for
- [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+ [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.ListDatabasesRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.ListDatabasesRequest):
request = spanner_database_admin.ListDatabasesRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -510,13 +993,26 @@ def list_databases(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDatabasesPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
@@ -524,13 +1020,15 @@ def list_databases(
def create_database(
self,
- request: spanner_database_admin.CreateDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.CreateDatabaseRequest, dict]
+ ] = None,
*,
- parent: str = None,
- create_statement: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ create_statement: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation.Operation:
r"""Creates a new Cloud Spanner database and starts to prepare it
for serving. The returned [long-running
@@ -543,8 +1041,39 @@ def create_database(
is [Database][google.spanner.admin.database.v1.Database], if
successful.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_create_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateDatabaseRequest(
+ parent="parent_value",
+ create_statement="create_statement_value",
+ )
+
+ # Make the request
+ operation = client.create_database(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.CreateDatabaseRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.CreateDatabaseRequest, dict]):
The request object. The request for
[CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
parent (str):
@@ -570,8 +1099,10 @@ def create_database(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation.Operation:
@@ -583,19 +1114,20 @@ def create_database(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, create_statement])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, create_statement]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.CreateDatabaseRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.CreateDatabaseRequest):
request = spanner_database_admin.CreateDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -615,8 +1147,16 @@ def create_database(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation.from_gapic(
@@ -631,17 +1171,45 @@ def create_database(
def get_database(
self,
- request: spanner_database_admin.GetDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.GetDatabaseRequest, dict]
+ ] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_database_admin.Database:
r"""Gets the state of a Cloud Spanner database.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_get_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetDatabaseRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_database(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.GetDatabaseRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.GetDatabaseRequest, dict]):
The request object. The request for
[GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
name (str):
@@ -655,27 +1223,30 @@ def get_database(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Database:
A Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.GetDatabaseRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.GetDatabaseRequest):
request = spanner_database_admin.GetDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -693,70 +1264,291 @@ def get_database(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
- def update_database_ddl(
+ def update_database(
self,
- request: spanner_database_admin.UpdateDatabaseDdlRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.UpdateDatabaseRequest, dict]
+ ] = None,
*,
- database: str = None,
- statements: Sequence[str] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ database: Optional[spanner_database_admin.Database] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation.Operation:
- r"""Updates the schema of a Cloud Spanner database by
- creating/altering/dropping tables, columns, indexes, etc. The
- returned [long-running operation][google.longrunning.Operation]
- will have a name of the format
- ``/operations/`` and can be used to
- track execution of the schema change(s). The
+ r"""Updates a Cloud Spanner database. The returned [long-running
+ operation][google.longrunning.Operation] can be used to track
+ the progress of updating the database. If the named database
+ does not exist, returns ``NOT_FOUND``.
+
+ While the operation is pending:
+
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field is set to true.
+ - Cancelling the operation is best-effort. If the cancellation
+ succeeds, the operation metadata's
+ [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ is set, the updates are reverted, and the operation terminates
+ with a ``CANCELLED`` status.
+ - New UpdateDatabase requests will return a
+ ``FAILED_PRECONDITION`` error until the pending operation is
+ done (returns successfully or with error).
+ - Reading the database via the API continues to give the
+ pre-request values.
+
+ Upon completion of the returned operation:
+
+ - The new values are in effect and readable via the API.
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field becomes false.
+
+ The returned [long-running
+ operation][google.longrunning.Operation] will have a name of the
+ format
+ ``projects//instances//databases//operations/``
+ and can be used to track the database modification. The
[metadata][google.longrunning.Operation.metadata] field type is
- [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
- The operation has no response.
+ [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Database][google.spanner.admin.database.v1.Database], if
+ successful.
- Args:
- request (google.cloud.spanner_admin_database_v1.types.UpdateDatabaseDdlRequest):
- The request object. Enqueues the given DDL statements to
- be applied, in order but not necessarily all at once, to
- the database schema at some point (or points) in the
- future. The server checks that the statements are
- executable (syntactically valid, name tables that exist,
- etc.) before enqueueing them, but they may still fail
- upon
- later execution (e.g., if a statement from another batch
- of statements is applied first and it conflicts in some
- way, or if there is some data-related problem like a
- `NULL` value in a column to which `NOT NULL` would be
- added). If a statement fails, all subsequent statements
- in the batch are automatically cancelled.
- Each batch of statements is assigned a name which can be
- used with the
- [Operations][google.longrunning.Operations] API to
- monitor progress. See the
- [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
- field for more details.
- database (str):
- Required. The database to update.
- This corresponds to the ``database`` field
- on the ``request`` instance; if ``request`` is provided, this
- should not be set.
- statements (Sequence[str]):
- Required. DDL statements to be
- applied to the database.
+ .. code-block:: python
- This corresponds to the ``statements`` field
- on the ``request`` instance; if ``request`` is provided, this
- should not be set.
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_update_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ database = spanner_admin_database_v1.Database()
+ database.name = "name_value"
+
+ request = spanner_admin_database_v1.UpdateDatabaseRequest(
+ database=database,
+ )
+
+ # Make the request
+ operation = client.update_database(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseRequest, dict]):
+ The request object. The request for
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+ database (google.cloud.spanner_admin_database_v1.types.Database):
+ Required. The database to update. The ``name`` field of
+ the database is of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The list of fields to update. Currently, only
+ ``enable_drop_protection`` field can be updated.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.spanner_admin_database_v1.types.Database`
+ A Cloud Spanner database.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.UpdateDatabaseRequest):
+ request = spanner_database_admin.UpdateDatabaseRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.update_database]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("database.name", request.database.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ spanner_database_admin.Database,
+ metadata_type=spanner_database_admin.UpdateDatabaseMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def update_database_ddl(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.UpdateDatabaseDdlRequest, dict]
+ ] = None,
+ *,
+ database: Optional[str] = None,
+ statements: Optional[MutableSequence[str]] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation.Operation:
+ r"""Updates the schema of a Cloud Spanner database by
+ creating/altering/dropping tables, columns, indexes, etc. The
+ returned [long-running operation][google.longrunning.Operation]
+ will have a name of the format
+ ``/operations/`` and can be used to
+ track execution of the schema change(s). The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
+ The operation has no response.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_update_database_ddl():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateDatabaseDdlRequest(
+ database="database_value",
+ statements=['statements_value1', 'statements_value2'],
+ )
+
+ # Make the request
+ operation = client.update_database_ddl(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseDdlRequest, dict]):
+ The request object. Enqueues the given DDL statements to be applied, in
+ order but not necessarily all at once, to the database
+ schema at some point (or points) in the future. The
+ server checks that the statements are executable
+ (syntactically valid, name tables that exist, etc.)
+ before enqueueing them, but they may still fail upon
+ later execution (e.g., if a statement from another batch
+ of statements is applied first and it conflicts in some
+ way, or if there is some data-related problem like a
+ ``NULL`` value in a column to which ``NOT NULL`` would
+ be added). If a statement fails, all subsequent
+ statements in the batch are automatically cancelled.
+
+ Each batch of statements is assigned a name which can be
+ used with the
+ [Operations][google.longrunning.Operations] API to
+ monitor progress. See the
+ [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
+ field for more details.
+ database (str):
+ Required. The database to update.
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ statements (MutableSequence[str]):
+ Required. DDL statements to be
+ applied to the database.
+
+ This corresponds to the ``statements`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation.Operation:
@@ -773,24 +1565,22 @@ def update_database_ddl(
}
- The JSON representation for Empty is empty JSON
- object {}.
-
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database, statements])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, statements]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.UpdateDatabaseDdlRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.UpdateDatabaseDdlRequest):
request = spanner_database_admin.UpdateDatabaseDdlRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -810,8 +1600,16 @@ def update_database_ddl(
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation.from_gapic(
@@ -826,19 +1624,45 @@ def update_database_ddl(
def drop_database(
self,
- request: spanner_database_admin.DropDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.DropDatabaseRequest, dict]
+ ] = None,
*,
- database: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ database: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Drops (aka deletes) a Cloud Spanner database. Completed backups
for the database will be retained according to their
- ``expire_time``.
+ ``expire_time``. Note: Cloud Spanner might continue to accept
+ requests for a few seconds after the database has been deleted.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_drop_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DropDatabaseRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ client.drop_database(request=request)
Args:
- request (google.cloud.spanner_admin_database_v1.types.DropDatabaseRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.DropDatabaseRequest, dict]):
The request object. The request for
[DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
database (str):
@@ -849,23 +1673,26 @@ def drop_database(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.DropDatabaseRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.DropDatabaseRequest):
request = spanner_database_admin.DropDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -883,27 +1710,61 @@ def drop_database(
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
rpc(
- request, retry=retry, timeout=timeout, metadata=metadata,
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
def get_database_ddl(
self,
- request: spanner_database_admin.GetDatabaseDdlRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.GetDatabaseDdlRequest, dict]
+ ] = None,
*,
- database: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ database: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_database_admin.GetDatabaseDdlResponse:
r"""Returns the schema of a Cloud Spanner database as a list of
formatted DDL statements. This method does not show pending
schema updates, those may be queried using the
[Operations][google.longrunning.Operations] API.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_get_database_ddl():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetDatabaseDdlRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ response = client.get_database_ddl(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlRequest, dict]):
The request object. The request for
[GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
database (str):
@@ -917,29 +1778,32 @@ def get_database_ddl(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse:
The response for
- [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+ [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.GetDatabaseDdlRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.GetDatabaseDdlRequest):
request = spanner_database_admin.GetDatabaseDdlRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -957,20 +1821,28 @@ def get_database_ddl(
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
def set_iam_policy(
self,
- request: iam_policy_pb2.SetIamPolicyRequest = None,
+ request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None,
*,
- resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ resource: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> policy_pb2.Policy:
r"""Sets the access control policy on a database or backup resource.
Replaces any existing policy.
@@ -982,10 +1854,36 @@ def set_iam_policy(
permission on
[resource][google.iam.v1.SetIamPolicyRequest.resource].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ def sample_set_iam_policy():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.iam.v1.iam_policy_pb2.SetIamPolicyRequest):
- The request object. Request message for `SetIamPolicy`
- method.
+ request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]):
+ The request object. Request message for ``SetIamPolicy`` method.
resource (str):
REQUIRED: The resource for which the
policy is being specified. See the
@@ -998,72 +1896,52 @@ def set_iam_policy(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.policy_pb2.Policy:
- Defines an Identity and Access Management (IAM) policy. It is used to
- specify access control policies for Cloud Platform
- resources.
+ An Identity and Access Management (IAM) policy, which specifies access
+ controls for Google Cloud resources.
A Policy is a collection of bindings. A binding binds
- one or more members to a single role. Members can be
- user accounts, service accounts, Google groups, and
- domains (such as G Suite). A role is a named list of
- permissions (defined by IAM or configured by users).
- A binding can optionally specify a condition, which
- is a logic expression that further constrains the
- role binding based on attributes about the request
- and/or target resource.
-
- **JSON Example**
-
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": ["user:eve@example.com"],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ]
+ one or more members, or principals, to a single role.
+ Principals can be user accounts, service accounts,
+ Google groups, and domains (such as G Suite). A role
+ is a named list of permissions; each role can be an
+ IAM predefined role or a user-created custom role.
- }
+ For some types of Google Cloud resources, a binding
+ can also specify a condition, which is a logical
+ expression that allows access to a resource only if
+ the expression evaluates to true. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the [IAM
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+
+ **JSON example:**
- **YAML Example**
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z')
+ **YAML example:**
+
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
- [IAM developer's
- guide](\ https://cloud.google.com/iam/docs).
+ [IAM
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
@@ -1071,8 +1949,8 @@ def set_iam_policy(
)
if isinstance(request, dict):
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
# Null request, just make one.
@@ -1090,20 +1968,28 @@ def set_iam_policy(
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
def get_iam_policy(
self,
- request: iam_policy_pb2.GetIamPolicyRequest = None,
+ request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
*,
- resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ resource: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> policy_pb2.Policy:
r"""Gets the access control policy for a database or backup
resource. Returns an empty policy if a database or backup exists
@@ -1116,10 +2002,36 @@ def get_iam_policy(
permission on
[resource][google.iam.v1.GetIamPolicyRequest.resource].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ def sample_get_iam_policy():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.iam.v1.iam_policy_pb2.GetIamPolicyRequest):
- The request object. Request message for `GetIamPolicy`
- method.
+ request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]):
+ The request object. Request message for ``GetIamPolicy`` method.
resource (str):
REQUIRED: The resource for which the
policy is being requested. See the
@@ -1132,72 +2044,52 @@ def get_iam_policy(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.policy_pb2.Policy:
- Defines an Identity and Access Management (IAM) policy. It is used to
- specify access control policies for Cloud Platform
- resources.
+ An Identity and Access Management (IAM) policy, which specifies access
+ controls for Google Cloud resources.
A Policy is a collection of bindings. A binding binds
- one or more members to a single role. Members can be
- user accounts, service accounts, Google groups, and
- domains (such as G Suite). A role is a named list of
- permissions (defined by IAM or configured by users).
- A binding can optionally specify a condition, which
- is a logic expression that further constrains the
- role binding based on attributes about the request
- and/or target resource.
-
- **JSON Example**
-
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": ["user:eve@example.com"],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ]
+ one or more members, or principals, to a single role.
+ Principals can be user accounts, service accounts,
+ Google groups, and domains (such as G Suite). A role
+ is a named list of permissions; each role can be an
+ IAM predefined role or a user-created custom role.
- }
+ For some types of Google Cloud resources, a binding
+ can also specify a condition, which is a logical
+ expression that allows access to a resource only if
+ the expression evaluates to true. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the [IAM
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+
+ **JSON example:**
- **YAML Example**
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z')
+ **YAML example:**
+
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
- [IAM developer's
- guide](\ https://cloud.google.com/iam/docs).
+ [IAM
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
@@ -1205,8 +2097,8 @@ def get_iam_policy(
)
if isinstance(request, dict):
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
# Null request, just make one.
@@ -1224,21 +2116,29 @@ def get_iam_policy(
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
def test_iam_permissions(
self,
- request: iam_policy_pb2.TestIamPermissionsRequest = None,
+ request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None,
*,
- resource: str = None,
- permissions: Sequence[str] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ resource: Optional[str] = None,
+ permissions: Optional[MutableSequence[str]] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns permissions that the caller has on the specified
database or backup resource.
@@ -1251,10 +2151,37 @@ def test_iam_permissions(
in a NOT_FOUND error if the user has ``spanner.backups.list``
permission on the containing instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ def sample_test_iam_permissions():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest):
- The request object. Request message for
- `TestIamPermissions` method.
+ request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]):
+ The request object. Request message for ``TestIamPermissions`` method.
resource (str):
REQUIRED: The resource for which the
policy detail is being requested. See
@@ -1264,7 +2191,7 @@ def test_iam_permissions(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- permissions (Sequence[str]):
+ permissions (MutableSequence[str]):
The set of permissions to check for the ``resource``.
Permissions with wildcards (such as '*' or 'storage.*')
are not allowed. For more information see `IAM
@@ -1276,17 +2203,22 @@ def test_iam_permissions(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource, permissions])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource, permissions]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
@@ -1294,8 +2226,8 @@ def test_iam_permissions(
)
if isinstance(request, dict):
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
elif not request:
# Null request, just make one.
@@ -1315,22 +2247,30 @@ def test_iam_permissions(
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
def create_backup(
self,
- request: gsad_backup.CreateBackupRequest = None,
+ request: Optional[Union[gsad_backup.CreateBackupRequest, dict]] = None,
*,
- parent: str = None,
- backup: gsad_backup.Backup = None,
- backup_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ backup: Optional[gsad_backup.Backup] = None,
+ backup_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation.Operation:
r"""Starts creating a new Cloud Spanner Backup. The returned backup
[long-running operation][google.longrunning.Operation] will have
@@ -1346,8 +2286,39 @@ def create_backup(
backup creation per database. Backup creation of different
databases can run concurrently.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_create_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ )
+
+ # Make the request
+ operation = client.create_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.CreateBackupRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.CreateBackupRequest, dict]):
The request object. The request for
[CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
parent (str):
@@ -1379,8 +2350,10 @@ def create_backup(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation.Operation:
@@ -1392,19 +2365,20 @@ def create_backup(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, backup, backup_id])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup, backup_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a gsad_backup.CreateBackupRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, gsad_backup.CreateBackupRequest):
request = gsad_backup.CreateBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -1426,8 +2400,16 @@ def create_backup(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation.from_gapic(
@@ -1440,100 +2422,341 @@ def create_backup(
# Done; return the response.
return response
- def get_backup(
+ def copy_backup(
self,
- request: backup.GetBackupRequest = None,
+ request: Optional[Union[backup.CopyBackupRequest, dict]] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> backup.Backup:
- r"""Gets metadata on a pending or completed
- [Backup][google.spanner.admin.database.v1.Backup].
+ parent: Optional[str] = None,
+ backup_id: Optional[str] = None,
+ source_backup: Optional[str] = None,
+ expire_time: Optional[timestamp_pb2.Timestamp] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation.Operation:
+ r"""Starts copying a Cloud Spanner Backup. The returned backup
+ [long-running operation][google.longrunning.Operation] will have
+ a name of the format
+ ``projects//instances//backups//operations/``
+ and can be used to track copying of the backup. The operation is
+ associated with the destination backup. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Backup][google.spanner.admin.database.v1.Backup], if
+ successful. Cancelling the returned operation will stop the
+ copying and delete the destination backup. Concurrent CopyBackup
+ requests can run on the same source backup.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_copy_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CopyBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ )
+
+ # Make the request
+ operation = client.copy_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
Args:
- request (google.cloud.spanner_admin_database_v1.types.GetBackupRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.CopyBackupRequest, dict]):
The request object. The request for
- [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
- name (str):
- Required. Name of the backup. Values are of the form
+ [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
+ parent (str):
+ Required. The name of the destination instance that will
+ contain the backup copy. Values are of the form:
+ ``projects//instances/``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_id (str):
+ Required. The id of the backup copy. The ``backup_id``
+ appended to ``parent`` forms the full backup_uri of the
+ form
``projects//instances//backups/``.
- This corresponds to the ``name`` field
+ This corresponds to the ``backup_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ source_backup (str):
+ Required. The source backup to be copied. The source
+ backup needs to be in READY state for it to be copied.
+ Once CopyBackup is in progress, the source backup cannot
+ be deleted or cleaned up on expiration until CopyBackup
+ is finished. Values are of the form:
+ ``projects//instances//backups/``.
+
+ This corresponds to the ``source_backup`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ expire_time (google.protobuf.timestamp_pb2.Timestamp):
+ Required. The expiration time of the backup in
+ microsecond granularity. The expiration time must be at
+ least 6 hours and at most 366 days from the
+ ``create_time`` of the source backup. Once the
+ ``expire_time`` has passed, the backup is eligible to be
+ automatically deleted by Cloud Spanner to free the
+ resources used by the backup.
+
+ This corresponds to the ``expire_time`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
- google.cloud.spanner_admin_database_v1.types.Backup:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.spanner_admin_database_v1.types.Backup`
A backup of a Cloud Spanner database.
+
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup_id, source_backup, expire_time]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a backup.GetBackupRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
- if not isinstance(request, backup.GetBackupRequest):
- request = backup.GetBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.CopyBackupRequest):
+ request = backup.CopyBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
- if name is not None:
- request.name = name
+ if parent is not None:
+ request.parent = parent
+ if backup_id is not None:
+ request.backup_id = backup_id
+ if source_backup is not None:
+ request.source_backup = source_backup
+ if expire_time is not None:
+ request.expire_time = expire_time
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = self._transport._wrapped_methods[self._transport.get_backup]
+ rpc = self._transport._wrapped_methods[self._transport.copy_backup]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
- gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ backup.Backup,
+ metadata_type=backup.CopyBackupMetadata,
+ )
# Done; return the response.
return response
- def update_backup(
+ def get_backup(
self,
- request: gsad_backup.UpdateBackupRequest = None,
+ request: Optional[Union[backup.GetBackupRequest, dict]] = None,
*,
- backup: gsad_backup.Backup = None,
- update_mask: field_mask_pb2.FieldMask = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> gsad_backup.Backup:
- r"""Updates a pending or completed
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup.Backup:
+ r"""Gets metadata on a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_get_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_backup(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.UpdateBackupRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.GetBackupRequest, dict]):
The request object. The request for
- [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
- backup (google.cloud.spanner_admin_database_v1.types.Backup):
- Required. The backup to update. ``backup.name``, and the
- fields to be updated as specified by ``update_mask`` are
- required. Other fields are ignored. Update is only
- supported for the following fields:
+ [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
+ name (str):
+ Required. Name of the backup. Values are of the form
+ ``projects//instances//backups/``.
- - ``backup.expire_time``.
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.Backup:
+ A backup of a Cloud Spanner database.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.GetBackupRequest):
+ request = backup.GetBackupRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_backup]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def update_backup(
+ self,
+ request: Optional[Union[gsad_backup.UpdateBackupRequest, dict]] = None,
+ *,
+ backup: Optional[gsad_backup.Backup] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup.Backup:
+ r"""Updates a pending or completed
+ [Backup][google.spanner.admin.database.v1.Backup].
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_update_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateBackupRequest(
+ )
+
+ # Make the request
+ response = client.update_backup(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.UpdateBackupRequest, dict]):
+ The request object. The request for
+ [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
+ backup (google.cloud.spanner_admin_database_v1.types.Backup):
+ Required. The backup to update. ``backup.name``, and the
+ fields to be updated as specified by ``update_mask`` are
+ required. Other fields are ignored. Update is only
+ supported for the following fields:
+
+ - ``backup.expire_time``.
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -1553,27 +2776,30 @@ def update_backup(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Backup:
A backup of a Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([backup, update_mask])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [backup, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a gsad_backup.UpdateBackupRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, gsad_backup.UpdateBackupRequest):
request = gsad_backup.UpdateBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -1595,26 +2821,57 @@ def update_backup(
),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Done; return the response.
return response
def delete_backup(
self,
- request: backup.DeleteBackupRequest = None,
+ request: Optional[Union[backup.DeleteBackupRequest, dict]] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Deletes a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_delete_backup():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DeleteBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_backup(request=request)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.DeleteBackupRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.DeleteBackupRequest, dict]):
The request object. The request for
[DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
name (str):
@@ -1628,23 +2885,26 @@ def delete_backup(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a backup.DeleteBackupRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, backup.DeleteBackupRequest):
request = backup.DeleteBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -1662,26 +2922,59 @@ def delete_backup(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
rpc(
- request, retry=retry, timeout=timeout, metadata=metadata,
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
def list_backups(
self,
- request: backup.ListBackupsRequest = None,
+ request: Optional[Union[backup.ListBackupsRequest, dict]] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListBackupsPager:
r"""Lists completed and pending backups. Backups returned are
ordered by ``create_time`` in descending order, starting from
the most recent ``create_time``.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_list_backups():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backups(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.ListBackupsRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.ListBackupsRequest, dict]):
The request object. The request for
[ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
parent (str):
@@ -1694,32 +2987,35 @@ def list_backups(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupsPager:
The response for
- [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+ [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a backup.ListBackupsRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, backup.ListBackupsRequest):
request = backup.ListBackupsRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -1737,13 +3033,26 @@ def list_backups(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListBackupsPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
@@ -1751,14 +3060,16 @@ def list_backups(
def restore_database(
self,
- request: spanner_database_admin.RestoreDatabaseRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.RestoreDatabaseRequest, dict]
+ ] = None,
*,
- parent: str = None,
- database_id: str = None,
- backup: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ database_id: Optional[str] = None,
+ backup: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation.Operation:
r"""Create a new database by restoring from a completed backup. The
new database must be in the same project and in an instance with
@@ -1780,8 +3091,40 @@ def restore_database(
without waiting for the optimize operation associated with the
first restore to complete.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_restore_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.RestoreDatabaseRequest(
+ backup="backup_value",
+ parent="parent_value",
+ database_id="database_id_value",
+ )
+
+ # Make the request
+ operation = client.restore_database(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.RestoreDatabaseRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.RestoreDatabaseRequest, dict]):
The request object. The request for
[RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
parent (str):
@@ -1815,8 +3158,10 @@ def restore_database(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation.Operation:
@@ -1828,19 +3173,20 @@ def restore_database(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, database_id, backup])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, database_id, backup]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.RestoreDatabaseRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.RestoreDatabaseRequest):
request = spanner_database_admin.RestoreDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -1862,8 +3208,16 @@ def restore_database(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# Wrap the response in an operation future.
response = operation.from_gapic(
@@ -1878,12 +3232,14 @@ def restore_database(
def list_database_operations(
self,
- request: spanner_database_admin.ListDatabaseOperationsRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabaseOperationsRequest, dict]
+ ] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabaseOperationsPager:
r"""Lists database
[longrunning-operations][google.longrunning.Operation]. A
@@ -1896,8 +3252,35 @@ def list_database_operations(
completed/failed/canceled within the last 7 days, and pending
operations.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_list_database_operations():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListDatabaseOperationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_database_operations(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest, dict]):
The request object. The request for
[ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
parent (str):
@@ -1911,8 +3294,10 @@ def list_database_operations(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseOperationsPager:
@@ -1924,19 +3309,20 @@ def list_database_operations(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.ListDatabaseOperationsRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(
request, spanner_database_admin.ListDatabaseOperationsRequest
):
@@ -1956,13 +3342,26 @@ def list_database_operations(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDatabaseOperationsPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
@@ -1970,12 +3369,12 @@ def list_database_operations(
def list_backup_operations(
self,
- request: backup.ListBackupOperationsRequest = None,
+ request: Optional[Union[backup.ListBackupOperationsRequest, dict]] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListBackupOperationsPager:
r"""Lists the backup [long-running
operations][google.longrunning.Operation] in the given instance.
@@ -1990,8 +3389,35 @@ def list_backup_operations(
``operation.metadata.value.progress.start_time`` in descending
order starting from the most recently started operation.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_list_backup_operations():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupOperationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backup_operations(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
- request (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest):
+ request (Union[google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest, dict]):
The request object. The request for
[ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
parent (str):
@@ -2005,8 +3431,10 @@ def list_backup_operations(
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsPager:
@@ -2018,19 +3446,20 @@ def list_backup_operations(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a backup.ListBackupOperationsRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, backup.ListBackupOperationsRequest):
request = backup.ListBackupOperationsRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -2048,27 +3477,1246 @@ def list_backup_operations(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListBackupOperationsPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
return response
+ def list_database_roles(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabaseRolesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListDatabaseRolesPager:
+ r"""Lists Cloud Spanner database roles.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_list_database_roles():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListDatabaseRolesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_database_roles(request=request)
-try:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
- gapic_version=pkg_resources.get_distribution(
- "google-cloud-spanner-admin-database",
- ).version,
- )
-except pkg_resources.DistributionNotFound:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesRequest, dict]):
+ The request object. The request for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+ parent (str):
+ Required. The database whose roles should be listed.
+ Values are of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseRolesPager:
+ The response for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.ListDatabaseRolesRequest):
+ request = spanner_database_admin.ListDatabaseRolesRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_database_roles]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListDatabaseRolesPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def add_split_points(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.AddSplitPointsRequest, dict]
+ ] = None,
+ *,
+ database: Optional[str] = None,
+ split_points: Optional[
+ MutableSequence[spanner_database_admin.SplitPoints]
+ ] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.AddSplitPointsResponse:
+ r"""Adds split points to specified tables, indexes of a
+ database.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_add_split_points():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.AddSplitPointsRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ response = client.add_split_points(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.AddSplitPointsRequest, dict]):
+ The request object. The request for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+ database (str):
+ Required. The database on whose tables/indexes split
+ points are to be added. Values are of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ split_points (MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints]):
+ Required. The split points to add.
+ This corresponds to the ``split_points`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.AddSplitPointsResponse:
+ The response for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, split_points]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.AddSplitPointsRequest):
+ request = spanner_database_admin.AddSplitPointsRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if split_points is not None:
+ request.split_points = split_points
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.add_split_points]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def create_backup_schedule(
+ self,
+ request: Optional[
+ Union[gsad_backup_schedule.CreateBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ backup_schedule: Optional[gsad_backup_schedule.BackupSchedule] = None,
+ backup_schedule_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Creates a new backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_create_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateBackupScheduleRequest(
+ parent="parent_value",
+ backup_schedule_id="backup_schedule_id_value",
+ )
+
+ # Make the request
+ response = client.create_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.CreateBackupScheduleRequest, dict]):
+ The request object. The request for
+ [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
+ parent (str):
+ Required. The name of the database
+ that this backup schedule applies to.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_schedule (google.cloud.spanner_admin_database_v1.types.BackupSchedule):
+ Required. The backup schedule to
+ create.
+
+ This corresponds to the ``backup_schedule`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_schedule_id (str):
+ Required. The Id to use for the backup schedule. The
+ ``backup_schedule_id`` appended to ``parent`` forms the
+ full backup schedule name of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``backup_schedule_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup_schedule, backup_schedule_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup_schedule.CreateBackupScheduleRequest):
+ request = gsad_backup_schedule.CreateBackupScheduleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if backup_schedule is not None:
+ request.backup_schedule = backup_schedule
+ if backup_schedule_id is not None:
+ request.backup_schedule_id = backup_schedule_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.create_backup_schedule]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def get_backup_schedule(
+ self,
+ request: Optional[Union[backup_schedule.GetBackupScheduleRequest, dict]] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup_schedule.BackupSchedule:
+ r"""Gets backup schedule for the input schedule name.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_get_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetBackupScheduleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.GetBackupScheduleRequest, dict]):
+ The request object. The request for
+ [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
+ name (str):
+ Required. The name of the schedule to retrieve. Values
+ are of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.GetBackupScheduleRequest):
+ request = backup_schedule.GetBackupScheduleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_backup_schedule]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def update_backup_schedule(
+ self,
+ request: Optional[
+ Union[gsad_backup_schedule.UpdateBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ backup_schedule: Optional[gsad_backup_schedule.BackupSchedule] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Updates a backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_update_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateBackupScheduleRequest(
+ )
+
+ # Make the request
+ response = client.update_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.UpdateBackupScheduleRequest, dict]):
+ The request object. The request for
+ [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
+ backup_schedule (google.cloud.spanner_admin_database_v1.types.BackupSchedule):
+ Required. The backup schedule to update.
+ ``backup_schedule.name``, and the fields to be updated
+ as specified by ``update_mask`` are required. Other
+ fields are ignored.
+
+ This corresponds to the ``backup_schedule`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. A mask specifying which
+ fields in the BackupSchedule resource
+ should be updated. This mask is relative
+ to the BackupSchedule resource, not to
+ the request message. The field mask must
+ always be specified; this prevents any
+ future fields from being erased
+ accidentally.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [backup_schedule, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup_schedule.UpdateBackupScheduleRequest):
+ request = gsad_backup_schedule.UpdateBackupScheduleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if backup_schedule is not None:
+ request.backup_schedule = backup_schedule
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.update_backup_schedule]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("backup_schedule.name", request.backup_schedule.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def delete_backup_schedule(
+ self,
+ request: Optional[
+ Union[backup_schedule.DeleteBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_delete_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DeleteBackupScheduleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_backup_schedule(request=request)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.DeleteBackupScheduleRequest, dict]):
+ The request object. The request for
+ [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
+ name (str):
+ Required. The name of the schedule to delete. Values are
+ of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.DeleteBackupScheduleRequest):
+ request = backup_schedule.DeleteBackupScheduleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_backup_schedule]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ def list_backup_schedules(
+ self,
+ request: Optional[
+ Union[backup_schedule.ListBackupSchedulesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListBackupSchedulesPager:
+ r"""Lists all the backup schedules for the database.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_list_backup_schedules():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupSchedulesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backup_schedules(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesRequest, dict]):
+ The request object. The request for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+ parent (str):
+ Required. Database is the parent
+ resource whose backup schedules should
+ be listed. Values are of the form
+ projects//instances//databases/
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupSchedulesPager:
+ The response for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.ListBackupSchedulesRequest):
+ request = backup_schedule.ListBackupSchedulesRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_backup_schedules]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListBackupSchedulesPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def internal_update_graph_operation(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.InternalUpdateGraphOperationRequest, dict]
+ ] = None,
+ *,
+ database: Optional[str] = None,
+ operation_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.InternalUpdateGraphOperationResponse:
+ r"""This is an internal API called by Spanner Graph jobs.
+ You should never need to call this API directly.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_internal_update_graph_operation():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.InternalUpdateGraphOperationRequest(
+ database="database_value",
+ operation_id="operation_id_value",
+ vm_identity_token="vm_identity_token_value",
+ )
+
+ # Make the request
+ response = client.internal_update_graph_operation(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.InternalUpdateGraphOperationRequest, dict]):
+ The request object. Internal request proto, do not use
+ directly.
+ database (str):
+ Internal field, do not use directly.
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ operation_id (str):
+ Internal field, do not use directly.
+ This corresponds to the ``operation_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.InternalUpdateGraphOperationResponse:
+ Internal response proto, do not use
+ directly.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, operation_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_database_admin.InternalUpdateGraphOperationRequest
+ ):
+ request = spanner_database_admin.InternalUpdateGraphOperationRequest(
+ request
+ )
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if operation_id is not None:
+ request.operation_id = operation_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.internal_update_graph_operation
+ ]
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def __enter__(self) -> "DatabaseAdminClient":
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Releases underlying transport's resources.
+
+ .. warning::
+ ONLY use as a context manager if the transport is NOT shared
+ with other clients! Exiting the with block will CLOSE the transport
+ and may cause errors in other clients!
+ """
+ self.transport.close()
+
+ def list_operations(
+ self,
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.ListOperationsResponse:
+ r"""Lists operations that match the specified filter in the request.
+
+ Args:
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
+ The request object. Request message for
+ `ListOperations` method.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ ~.operations_pb2.ListOperationsResponse:
+ Response message for ``ListOperations`` method.
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.ListOperationsRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_operations]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ try:
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+ except core_exceptions.GoogleAPICallError as e:
+ self._add_cred_info_for_auth_errors(e)
+ raise e
+
+ def get_operation(
+ self,
+ request: Optional[operations_pb2.GetOperationRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Gets the latest state of a long-running operation.
+
+ Args:
+ request (:class:`~.operations_pb2.GetOperationRequest`):
+ The request object. Request message for
+ `GetOperation` method.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ ~.operations_pb2.Operation:
+ An ``Operation`` object.
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.GetOperationRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_operation]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ try:
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+ except core_exceptions.GoogleAPICallError as e:
+ self._add_cred_info_for_auth_errors(e)
+ raise e
+
+ def delete_operation(
+ self,
+ request: Optional[operations_pb2.DeleteOperationRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a long-running operation.
+
+ This method indicates that the client is no longer interested
+ in the operation result. It does not cancel the operation.
+ If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`.
+
+ Args:
+ request (:class:`~.operations_pb2.DeleteOperationRequest`):
+ The request object. Request message for
+ `DeleteOperation` method.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ None
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.DeleteOperationRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_operation]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ def cancel_operation(
+ self,
+ request: Optional[operations_pb2.CancelOperationRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Starts asynchronous cancellation on a long-running operation.
+
+ The server makes a best effort to cancel the operation, but success
+ is not guaranteed. If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`.
+
+ Args:
+ request (:class:`~.operations_pb2.CancelOperationRequest`):
+ The request object. Request message for
+ `CancelOperation` method.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ None
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.CancelOperationRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.cancel_operation]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+
+DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=package_version.__version__
+)
+if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
+ DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
__all__ = ("DatabaseAdminClient",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py b/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py
index 552f761751..c9e2e14d52 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,18 +13,32 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.api_core import retry_async as retries_async
from typing import (
Any,
- AsyncIterable,
+ AsyncIterator,
Awaitable,
Callable,
- Iterable,
Sequence,
Tuple,
Optional,
+ Iterator,
+ Union,
)
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+ OptionalAsyncRetry = Union[
+ retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None
+ ]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+ OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore
+
from google.cloud.spanner_admin_database_v1.types import backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.longrunning import operations_pb2 # type: ignore
@@ -53,7 +67,9 @@ def __init__(
request: spanner_database_admin.ListDatabasesRequest,
response: spanner_database_admin.ListDatabasesResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiate the pager.
@@ -64,26 +80,38 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabasesRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- def pages(self) -> Iterable[spanner_database_admin.ListDatabasesResponse]:
+ def pages(self) -> Iterator[spanner_database_admin.ListDatabasesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request, metadata=self._metadata)
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __iter__(self) -> Iterable[spanner_database_admin.Database]:
+ def __iter__(self) -> Iterator[spanner_database_admin.Database]:
for page in self.pages:
yield from page.databases
@@ -115,7 +143,9 @@ def __init__(
request: spanner_database_admin.ListDatabasesRequest,
response: spanner_database_admin.ListDatabasesResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiates the pager.
@@ -126,12 +156,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabasesRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -140,14 +177,19 @@ def __getattr__(self, name: str) -> Any:
@property
async def pages(
self,
- ) -> AsyncIterable[spanner_database_admin.ListDatabasesResponse]:
+ ) -> AsyncIterator[spanner_database_admin.ListDatabasesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request, metadata=self._metadata)
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __aiter__(self) -> AsyncIterable[spanner_database_admin.Database]:
+ def __aiter__(self) -> AsyncIterator[spanner_database_admin.Database]:
async def async_generator():
async for page in self.pages:
for response in page.databases:
@@ -183,7 +225,9 @@ def __init__(
request: backup.ListBackupsRequest,
response: backup.ListBackupsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiate the pager.
@@ -194,26 +238,38 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = backup.ListBackupsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- def pages(self) -> Iterable[backup.ListBackupsResponse]:
+ def pages(self) -> Iterator[backup.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request, metadata=self._metadata)
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __iter__(self) -> Iterable[backup.Backup]:
+ def __iter__(self) -> Iterator[backup.Backup]:
for page in self.pages:
yield from page.backups
@@ -245,7 +301,9 @@ def __init__(
request: backup.ListBackupsRequest,
response: backup.ListBackupsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiates the pager.
@@ -256,26 +314,38 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = backup.ListBackupsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- async def pages(self) -> AsyncIterable[backup.ListBackupsResponse]:
+ async def pages(self) -> AsyncIterator[backup.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request, metadata=self._metadata)
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __aiter__(self) -> AsyncIterable[backup.Backup]:
+ def __aiter__(self) -> AsyncIterator[backup.Backup]:
async def async_generator():
async for page in self.pages:
for response in page.backups:
@@ -311,7 +381,9 @@ def __init__(
request: spanner_database_admin.ListDatabaseOperationsRequest,
response: spanner_database_admin.ListDatabaseOperationsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiate the pager.
@@ -322,26 +394,38 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabaseOperationsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- def pages(self) -> Iterable[spanner_database_admin.ListDatabaseOperationsResponse]:
+ def pages(self) -> Iterator[spanner_database_admin.ListDatabaseOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request, metadata=self._metadata)
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __iter__(self) -> Iterable[operations_pb2.Operation]:
+ def __iter__(self) -> Iterator[operations_pb2.Operation]:
for page in self.pages:
yield from page.operations
@@ -375,7 +459,9 @@ def __init__(
request: spanner_database_admin.ListDatabaseOperationsRequest,
response: spanner_database_admin.ListDatabaseOperationsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiates the pager.
@@ -386,12 +472,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabaseOperationsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -400,14 +493,19 @@ def __getattr__(self, name: str) -> Any:
@property
async def pages(
self,
- ) -> AsyncIterable[spanner_database_admin.ListDatabaseOperationsResponse]:
+ ) -> AsyncIterator[spanner_database_admin.ListDatabaseOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request, metadata=self._metadata)
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __aiter__(self) -> AsyncIterable[operations_pb2.Operation]:
+ def __aiter__(self) -> AsyncIterator[operations_pb2.Operation]:
async def async_generator():
async for page in self.pages:
for response in page.operations:
@@ -443,7 +541,9 @@ def __init__(
request: backup.ListBackupOperationsRequest,
response: backup.ListBackupOperationsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiate the pager.
@@ -454,26 +554,38 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = backup.ListBackupOperationsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- def pages(self) -> Iterable[backup.ListBackupOperationsResponse]:
+ def pages(self) -> Iterator[backup.ListBackupOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request, metadata=self._metadata)
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __iter__(self) -> Iterable[operations_pb2.Operation]:
+ def __iter__(self) -> Iterator[operations_pb2.Operation]:
for page in self.pages:
yield from page.operations
@@ -505,7 +617,9 @@ def __init__(
request: backup.ListBackupOperationsRequest,
response: backup.ListBackupOperationsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiates the pager.
@@ -516,26 +630,38 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = backup.ListBackupOperationsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- async def pages(self) -> AsyncIterable[backup.ListBackupOperationsResponse]:
+ async def pages(self) -> AsyncIterator[backup.ListBackupOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request, metadata=self._metadata)
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
- def __aiter__(self) -> AsyncIterable[operations_pb2.Operation]:
+ def __aiter__(self) -> AsyncIterator[operations_pb2.Operation]:
async def async_generator():
async for page in self.pages:
for response in page.operations:
@@ -545,3 +671,319 @@ async def async_generator():
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListDatabaseRolesPager:
+ """A pager for iterating through ``list_database_roles`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``database_roles`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListDatabaseRoles`` requests and continue to iterate
+ through the ``database_roles`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., spanner_database_admin.ListDatabaseRolesResponse],
+ request: spanner_database_admin.ListDatabaseRolesRequest,
+ response: spanner_database_admin.ListDatabaseRolesResponse,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesRequest):
+ The initial request object.
+ response (google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse):
+ The initial response object.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ self._method = method
+ self._request = spanner_database_admin.ListDatabaseRolesRequest(request)
+ self._response = response
+ self._retry = retry
+ self._timeout = timeout
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterator[spanner_database_admin.ListDatabaseRolesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
+ yield self._response
+
+ def __iter__(self) -> Iterator[spanner_database_admin.DatabaseRole]:
+ for page in self.pages:
+ yield from page.database_roles
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListDatabaseRolesAsyncPager:
+ """A pager for iterating through ``list_database_roles`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``database_roles`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListDatabaseRoles`` requests and continue to iterate
+ through the ``database_roles`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[
+ ..., Awaitable[spanner_database_admin.ListDatabaseRolesResponse]
+ ],
+ request: spanner_database_admin.ListDatabaseRolesRequest,
+ response: spanner_database_admin.ListDatabaseRolesResponse,
+ *,
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
+ ):
+ """Instantiates the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesRequest):
+ The initial request object.
+ response (google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse):
+ The initial response object.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ self._method = method
+ self._request = spanner_database_admin.ListDatabaseRolesRequest(request)
+ self._response = response
+ self._retry = retry
+ self._timeout = timeout
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(
+ self,
+ ) -> AsyncIterator[spanner_database_admin.ListDatabaseRolesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterator[spanner_database_admin.DatabaseRole]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.database_roles:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListBackupSchedulesPager:
+ """A pager for iterating through ``list_backup_schedules`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``backup_schedules`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListBackupSchedules`` requests and continue to iterate
+ through the ``backup_schedules`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., backup_schedule.ListBackupSchedulesResponse],
+ request: backup_schedule.ListBackupSchedulesRequest,
+ response: backup_schedule.ListBackupSchedulesResponse,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesRequest):
+ The initial request object.
+ response (google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse):
+ The initial response object.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ self._method = method
+ self._request = backup_schedule.ListBackupSchedulesRequest(request)
+ self._response = response
+ self._retry = retry
+ self._timeout = timeout
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterator[backup_schedule.ListBackupSchedulesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
+ yield self._response
+
+ def __iter__(self) -> Iterator[backup_schedule.BackupSchedule]:
+ for page in self.pages:
+ yield from page.backup_schedules
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListBackupSchedulesAsyncPager:
+ """A pager for iterating through ``list_backup_schedules`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``backup_schedules`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListBackupSchedules`` requests and continue to iterate
+ through the ``backup_schedules`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[backup_schedule.ListBackupSchedulesResponse]],
+ request: backup_schedule.ListBackupSchedulesRequest,
+ response: backup_schedule.ListBackupSchedulesResponse,
+ *,
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
+ ):
+ """Instantiates the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesRequest):
+ The initial request object.
+ response (google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse):
+ The initial response object.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ self._method = method
+ self._request = backup_schedule.ListBackupSchedulesRequest(request)
+ self._response = response
+ self._retry = retry
+ self._timeout = timeout
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterator[backup_schedule.ListBackupSchedulesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterator[backup_schedule.BackupSchedule]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.backup_schedules:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/README.rst b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/README.rst
new file mode 100644
index 0000000000..f70c023a98
--- /dev/null
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/README.rst
@@ -0,0 +1,9 @@
+
+transport inheritance structure
+_______________________________
+
+`DatabaseAdminTransport` is the ABC for all transports.
+- public child `DatabaseAdminGrpcTransport` for sync gRPC transport (defined in `grpc.py`).
+- public child `DatabaseAdminGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`).
+- private child `_BaseDatabaseAdminRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`).
+- public child `DatabaseAdminRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`).
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py
index 743a749bfa..23ba04ea21 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,15 +19,20 @@
from .base import DatabaseAdminTransport
from .grpc import DatabaseAdminGrpcTransport
from .grpc_asyncio import DatabaseAdminGrpcAsyncIOTransport
+from .rest import DatabaseAdminRestTransport
+from .rest import DatabaseAdminRestInterceptor
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[DatabaseAdminTransport]]
_transport_registry["grpc"] = DatabaseAdminGrpcTransport
_transport_registry["grpc_asyncio"] = DatabaseAdminGrpcAsyncIOTransport
+_transport_registry["rest"] = DatabaseAdminRestTransport
__all__ = (
"DatabaseAdminTransport",
"DatabaseAdminGrpcTransport",
"DatabaseAdminGrpcAsyncIOTransport",
+ "DatabaseAdminRestTransport",
+ "DatabaseAdminRestInterceptor",
)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py
index ec8cafa77f..689f6afe96 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,43 +15,37 @@
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
-import packaging.version
-import pkg_resources
+
+from google.cloud.spanner_admin_database_v1 import gapic_version as package_version
import google.auth # type: ignore
-import google.api_core # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
-from google.api_core import operations_v1 # type: ignore
+import google.api_core
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
+import google.protobuf
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
-try:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
- gapic_version=pkg_resources.get_distribution(
- "google-cloud-spanner-admin-database",
- ).version,
- )
-except pkg_resources.DistributionNotFound:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=package_version.__version__
+)
-try:
- # google.auth.__version__ was added in 1.26.0
- _GOOGLE_AUTH_VERSION = google.auth.__version__
-except AttributeError:
- try: # try pkg_resources if it is available
- _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
- except pkg_resources.DistributionNotFound: # pragma: NO COVER
- _GOOGLE_AUTH_VERSION = None
+if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
+ DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
class DatabaseAdminTransport(abc.ABC):
@@ -68,19 +62,20 @@ def __init__(
self,
*,
host: str = DEFAULT_HOST,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
+ api_audience: Optional[str] = None,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
- The hostname to connect to.
+ The hostname to connect to (default: 'spanner.googleapis.com').
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
@@ -100,15 +95,13 @@ def __init__(
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
- # Save the hostname. Default to port 443 (HTTPS) if none is specified.
- if ":" not in host:
- host += ":443"
- self._host = host
- scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
+ if not hasattr(self, "_ignore_credentials"):
+ self._ignore_credentials: bool = False
# If no credentials are provided, then determine the appropriate
# defaults.
@@ -121,13 +114,17 @@ def __init__(
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
-
- elif credentials is None:
+ elif credentials is None and not self._ignore_credentials:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
+ # Don't apply audience if the credentials file passed from user.
+ if hasattr(credentials, "with_gdch_audience"):
+ credentials = credentials.with_gdch_audience(
+ api_audience if api_audience else host
+ )
- # If the credentials is service account credentials, then always try to use self signed JWT.
+ # If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
@@ -138,28 +135,14 @@ def __init__(
# Save the credentials.
self._credentials = credentials
- # TODO(busunkim): This method is in the base transport
- # to avoid duplicating code across the transport classes. These functions
- # should be deleted once the minimum required versions of google-auth is increased.
-
- # TODO: Remove this function once google-auth >= 1.25.0 is required
- @classmethod
- def _get_scopes_kwargs(
- cls, host: str, scopes: Optional[Sequence[str]]
- ) -> Dict[str, Optional[Sequence[str]]]:
- """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
-
- scopes_kwargs = {}
-
- if _GOOGLE_AUTH_VERSION and (
- packaging.version.parse(_GOOGLE_AUTH_VERSION)
- >= packaging.version.parse("1.25.0")
- ):
- scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
- else:
- scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
+ if ":" not in host:
+ host += ":443"
+ self._host = host
- return scopes_kwargs
+ @property
+ def host(self):
+ return self._host
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
@@ -180,7 +163,9 @@ def _prep_wrapped_messages(self, client_info):
client_info=client_info,
),
self.create_database: gapic_v1.method.wrap_method(
- self.create_database, default_timeout=3600.0, client_info=client_info,
+ self.create_database,
+ default_timeout=3600.0,
+ client_info=client_info,
),
self.get_database: gapic_v1.method.wrap_method(
self.get_database,
@@ -197,6 +182,21 @@ def _prep_wrapped_messages(self, client_info):
default_timeout=3600.0,
client_info=client_info,
),
+ self.update_database: gapic_v1.method.wrap_method(
+ self.update_database,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
self.update_database_ddl: gapic_v1.method.wrap_method(
self.update_database_ddl,
default_retry=retries.Retry(
@@ -243,7 +243,9 @@ def _prep_wrapped_messages(self, client_info):
client_info=client_info,
),
self.set_iam_policy: gapic_v1.method.wrap_method(
- self.set_iam_policy, default_timeout=30.0, client_info=client_info,
+ self.set_iam_policy,
+ default_timeout=30.0,
+ client_info=client_info,
),
self.get_iam_policy: gapic_v1.method.wrap_method(
self.get_iam_policy,
@@ -266,7 +268,14 @@ def _prep_wrapped_messages(self, client_info):
client_info=client_info,
),
self.create_backup: gapic_v1.method.wrap_method(
- self.create_backup, default_timeout=3600.0, client_info=client_info,
+ self.create_backup,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.copy_backup: gapic_v1.method.wrap_method(
+ self.copy_backup,
+ default_timeout=3600.0,
+ client_info=client_info,
),
self.get_backup: gapic_v1.method.wrap_method(
self.get_backup,
@@ -329,7 +338,9 @@ def _prep_wrapped_messages(self, client_info):
client_info=client_info,
),
self.restore_database: gapic_v1.method.wrap_method(
- self.restore_database, default_timeout=3600.0, client_info=client_info,
+ self.restore_database,
+ default_timeout=3600.0,
+ client_info=client_info,
),
self.list_database_operations: gapic_v1.method.wrap_method(
self.list_database_operations,
@@ -361,10 +372,149 @@ def _prep_wrapped_messages(self, client_info):
default_timeout=3600.0,
client_info=client_info,
),
+ self.list_database_roles: gapic_v1.method.wrap_method(
+ self.list_database_roles,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.add_split_points: gapic_v1.method.wrap_method(
+ self.add_split_points,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.create_backup_schedule: gapic_v1.method.wrap_method(
+ self.create_backup_schedule,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_backup_schedule: gapic_v1.method.wrap_method(
+ self.get_backup_schedule,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_backup_schedule: gapic_v1.method.wrap_method(
+ self.update_backup_schedule,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.delete_backup_schedule: gapic_v1.method.wrap_method(
+ self.delete_backup_schedule,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_backup_schedules: gapic_v1.method.wrap_method(
+ self.list_backup_schedules,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.internal_update_graph_operation: gapic_v1.method.wrap_method(
+ self.internal_update_graph_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.cancel_operation: gapic_v1.method.wrap_method(
+ self.cancel_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_operation: gapic_v1.method.wrap_method(
+ self.delete_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_operation: gapic_v1.method.wrap_method(
+ self.get_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_operations: gapic_v1.method.wrap_method(
+ self.list_operations,
+ default_timeout=None,
+ client_info=client_info,
+ ),
}
+ def close(self):
+ """Closes resources associated with the transport.
+
+ .. warning::
+ Only call this method if the transport is NOT shared
+ with other clients - this may cause errors in other clients!
+ """
+ raise NotImplementedError()
+
@property
- def operations_client(self) -> operations_v1.OperationsClient:
+ def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@@ -400,6 +550,15 @@ def get_database(
]:
raise NotImplementedError()
+ @property
+ def update_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseRequest],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
+ ]:
+ raise NotImplementedError()
+
@property
def update_database_ddl(
self,
@@ -469,6 +628,15 @@ def create_backup(
]:
raise NotImplementedError()
+ @property
+ def copy_backup(
+ self,
+ ) -> Callable[
+ [backup.CopyBackupRequest],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
+ ]:
+ raise NotImplementedError()
+
@property
def get_backup(
self,
@@ -536,5 +704,134 @@ def list_backup_operations(
]:
raise NotImplementedError()
+ @property
+ def list_database_roles(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabaseRolesRequest],
+ Union[
+ spanner_database_admin.ListDatabaseRolesResponse,
+ Awaitable[spanner_database_admin.ListDatabaseRolesResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def add_split_points(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.AddSplitPointsRequest],
+ Union[
+ spanner_database_admin.AddSplitPointsResponse,
+ Awaitable[spanner_database_admin.AddSplitPointsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def create_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.CreateBackupScheduleRequest],
+ Union[
+ gsad_backup_schedule.BackupSchedule,
+ Awaitable[gsad_backup_schedule.BackupSchedule],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.GetBackupScheduleRequest],
+ Union[
+ backup_schedule.BackupSchedule, Awaitable[backup_schedule.BackupSchedule]
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def update_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.UpdateBackupScheduleRequest],
+ Union[
+ gsad_backup_schedule.BackupSchedule,
+ Awaitable[gsad_backup_schedule.BackupSchedule],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.DeleteBackupScheduleRequest],
+ Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_backup_schedules(
+ self,
+ ) -> Callable[
+ [backup_schedule.ListBackupSchedulesRequest],
+ Union[
+ backup_schedule.ListBackupSchedulesResponse,
+ Awaitable[backup_schedule.ListBackupSchedulesResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def internal_update_graph_operation(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.InternalUpdateGraphOperationRequest],
+ Union[
+ spanner_database_admin.InternalUpdateGraphOperationResponse,
+ Awaitable[spanner_database_admin.InternalUpdateGraphOperationResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_operations(
+ self,
+ ) -> Callable[
+ [operations_pb2.ListOperationsRequest],
+ Union[
+ operations_pb2.ListOperationsResponse,
+ Awaitable[operations_pb2.ListOperationsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_operation(
+ self,
+ ) -> Callable[
+ [operations_pb2.GetOperationRequest],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def cancel_operation(
+ self,
+ ) -> Callable[[operations_pb2.CancelOperationRequest], None,]:
+ raise NotImplementedError()
+
+ @property
+ def delete_operation(
+ self,
+ ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]:
+ raise NotImplementedError()
+
+ @property
+ def kind(self) -> str:
+ raise NotImplementedError()
+
__all__ = ("DatabaseAdminTransport",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py
index 00c46cf906..8f31a1fb98 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,20 +13,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+import json
+import logging as std_logging
+import pickle
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import grpc_helpers # type: ignore
-from google.api_core import operations_v1 # type: ignore
-from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers
+from google.api_core import operations_v1
+from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.protobuf.json_format import MessageToJson
+import google.protobuf.message
import grpc # type: ignore
+import proto # type: ignore
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
@@ -34,16 +44,92 @@
from google.protobuf import empty_pb2 # type: ignore
from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
+
+
+class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER
+ def intercept_unary_unary(self, continuation, client_call_details, request):
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ )
+ if logging_enabled: # pragma: NO COVER
+ request_metadata = client_call_details.metadata
+ if isinstance(request, proto.Message):
+ request_payload = type(request).to_json(request)
+ elif isinstance(request, google.protobuf.message.Message):
+ request_payload = MessageToJson(request)
+ else:
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
+
+ request_metadata = {
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
+ for key, value in request_metadata
+ }
+ grpc_request = {
+ "payload": request_payload,
+ "requestMethod": "grpc",
+ "metadata": dict(request_metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for {client_call_details.method}",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": str(client_call_details.method),
+ "request": grpc_request,
+ "metadata": grpc_request["metadata"],
+ },
+ )
+ response = continuation(client_call_details, request)
+ if logging_enabled: # pragma: NO COVER
+ response_metadata = response.trailing_metadata()
+ # Convert gRPC metadata `` to list of tuples
+ metadata = (
+ dict([(k, str(v)) for k, v in response_metadata])
+ if response_metadata
+ else None
+ )
+ result = response.result()
+ if isinstance(result, proto.Message):
+ response_payload = type(result).to_json(result)
+ elif isinstance(result, google.protobuf.message.Message):
+ response_payload = MessageToJson(result)
+ else:
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
+ grpc_response = {
+ "payload": response_payload,
+ "metadata": metadata,
+ "status": "OK",
+ }
+ _LOGGER.debug(
+ f"Received response for {client_call_details.method}.",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": client_call_details.method,
+ "response": grpc_response,
+ "metadata": grpc_response["metadata"],
+ },
+ )
+ return response
+
class DatabaseAdminGrpcTransport(DatabaseAdminTransport):
"""gRPC backend transport for DatabaseAdmin.
Cloud Spanner Database Admin API
- The Cloud Spanner Database Admin API can be used to create,
- drop, and list databases. It also enables updating the schema of
- pre-existing databases. It can be also used to create, delete
- and list backups for a database and to restore from an existing
- backup.
+
+ The Cloud Spanner Database Admin API can be used to:
+
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
@@ -59,50 +145,54 @@ def __init__(
self,
*,
host: str = "spanner.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
- scopes: Sequence[str] = None,
- channel: grpc.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
+ api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
- The hostname to connect to.
+ The hostname to connect to (default: 'spanner.googleapis.com').
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if a ``channel`` instance is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if a ``channel`` instance is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
- ignored if ``channel`` is provided.
- channel (Optional[grpc.Channel]): A ``Channel`` instance through
- which to make calls.
+ ignored if a ``channel`` instance is provided.
+ channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
+ A ``Channel`` instance through which to make calls, or a Callable
+ that constructs and returns one. If set to None, ``self.create_channel``
+ is used to create the channel. If a Callable is given, it will be called
+ with the same arguments as used in ``self.create_channel``.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
- ``client_cert_source`` or applicatin default SSL credentials.
+ ``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
- for grpc channel. It is ignored if ``channel`` is provided.
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
- both in PEM format. It is used to configure mutual TLS channel. It is
- ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ both in PEM format. It is used to configure a mutual TLS channel. It is
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
@@ -122,16 +212,17 @@ def __init__(
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
- self._operations_client = None
+ self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
- if channel:
+ if isinstance(channel, grpc.Channel):
# Ignore credentials if a channel was passed.
- credentials = False
+ credentials = None
+ self._ignore_credentials = True
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
@@ -166,13 +257,19 @@ def __init__(
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
+ api_audience=api_audience,
)
if not self._grpc_channel:
- self._grpc_channel = type(self).create_channel(
+ # initialize with the provided callable or the default channel
+ channel_init = channel or type(self).create_channel
+ self._grpc_channel = channel_init(
self._host,
+ # use the credentials which are saved
credentials=self._credentials,
- credentials_file=credentials_file,
+ # Set ``credentials_file`` to ``None`` here as
+ # the credentials that we saved earlier should be used.
+ credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
@@ -182,15 +279,20 @@ def __init__(
],
)
- # Wrap messages. This must be done after self._grpc_channel exists
+ self._interceptor = _LoggingClientInterceptor()
+ self._logged_channel = grpc.intercept_channel(
+ self._grpc_channel, self._interceptor
+ )
+
+ # Wrap messages. This must be done after self._logged_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "spanner.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
@@ -234,8 +336,7 @@ def create_channel(
@property
def grpc_channel(self) -> grpc.Channel:
- """Return the channel designed to connect to this service.
- """
+ """Return the channel designed to connect to this service."""
return self._grpc_channel
@property
@@ -245,9 +346,11 @@ def operations_client(self) -> operations_v1.OperationsClient:
This property caches on the instance; repeated calls return the same
client.
"""
- # Sanity check: Only create a new client if we do not already have one.
+ # Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
- self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
+ self._operations_client = operations_v1.OperationsClient(
+ self._logged_channel
+ )
# Return the client from cache.
return self._operations_client
@@ -274,7 +377,7 @@ def list_databases(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_databases" not in self._stubs:
- self._stubs["list_databases"] = self.grpc_channel.unary_unary(
+ self._stubs["list_databases"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases",
request_serializer=spanner_database_admin.ListDatabasesRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabasesResponse.deserialize,
@@ -311,7 +414,7 @@ def create_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_database" not in self._stubs:
- self._stubs["create_database"] = self.grpc_channel.unary_unary(
+ self._stubs["create_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase",
request_serializer=spanner_database_admin.CreateDatabaseRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -339,13 +442,78 @@ def get_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database" not in self._stubs:
- self._stubs["get_database"] = self.grpc_channel.unary_unary(
+ self._stubs["get_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase",
request_serializer=spanner_database_admin.GetDatabaseRequest.serialize,
response_deserializer=spanner_database_admin.Database.deserialize,
)
return self._stubs["get_database"]
+ @property
+ def update_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseRequest], operations_pb2.Operation
+ ]:
+ r"""Return a callable for the update database method over gRPC.
+
+ Updates a Cloud Spanner database. The returned [long-running
+ operation][google.longrunning.Operation] can be used to track
+ the progress of updating the database. If the named database
+ does not exist, returns ``NOT_FOUND``.
+
+ While the operation is pending:
+
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field is set to true.
+ - Cancelling the operation is best-effort. If the cancellation
+ succeeds, the operation metadata's
+ [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ is set, the updates are reverted, and the operation terminates
+ with a ``CANCELLED`` status.
+ - New UpdateDatabase requests will return a
+ ``FAILED_PRECONDITION`` error until the pending operation is
+ done (returns successfully or with error).
+ - Reading the database via the API continues to give the
+ pre-request values.
+
+ Upon completion of the returned operation:
+
+ - The new values are in effect and readable via the API.
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field becomes false.
+
+ The returned [long-running
+ operation][google.longrunning.Operation] will have a name of the
+ format
+ ``projects//instances//databases//operations/``
+ and can be used to track the database modification. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Database][google.spanner.admin.database.v1.Database], if
+ successful.
+
+ Returns:
+ Callable[[~.UpdateDatabaseRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_database" not in self._stubs:
+ self._stubs["update_database"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase",
+ request_serializer=spanner_database_admin.UpdateDatabaseRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["update_database"]
+
@property
def update_database_ddl(
self,
@@ -375,7 +543,7 @@ def update_database_ddl(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_database_ddl" not in self._stubs:
- self._stubs["update_database_ddl"] = self.grpc_channel.unary_unary(
+ self._stubs["update_database_ddl"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl",
request_serializer=spanner_database_admin.UpdateDatabaseDdlRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -390,7 +558,8 @@ def drop_database(
Drops (aka deletes) a Cloud Spanner database. Completed backups
for the database will be retained according to their
- ``expire_time``.
+ ``expire_time``. Note: Cloud Spanner might continue to accept
+ requests for a few seconds after the database has been deleted.
Returns:
Callable[[~.DropDatabaseRequest],
@@ -403,7 +572,7 @@ def drop_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "drop_database" not in self._stubs:
- self._stubs["drop_database"] = self.grpc_channel.unary_unary(
+ self._stubs["drop_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase",
request_serializer=spanner_database_admin.DropDatabaseRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
@@ -435,7 +604,7 @@ def get_database_ddl(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database_ddl" not in self._stubs:
- self._stubs["get_database_ddl"] = self.grpc_channel.unary_unary(
+ self._stubs["get_database_ddl"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl",
request_serializer=spanner_database_admin.GetDatabaseDdlRequest.serialize,
response_deserializer=spanner_database_admin.GetDatabaseDdlResponse.deserialize,
@@ -469,7 +638,7 @@ def set_iam_policy(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
- self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
+ self._stubs["set_iam_policy"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
@@ -504,7 +673,7 @@ def get_iam_policy(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
- self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
+ self._stubs["get_iam_policy"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
@@ -542,7 +711,7 @@ def test_iam_permissions(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
- self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
+ self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
@@ -580,13 +749,51 @@ def create_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_backup" not in self._stubs:
- self._stubs["create_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["create_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup",
request_serializer=gsad_backup.CreateBackupRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_backup"]
+ @property
+ def copy_backup(
+ self,
+ ) -> Callable[[backup.CopyBackupRequest], operations_pb2.Operation]:
+ r"""Return a callable for the copy backup method over gRPC.
+
+ Starts copying a Cloud Spanner Backup. The returned backup
+ [long-running operation][google.longrunning.Operation] will have
+ a name of the format
+ ``projects//instances//backups//operations/``
+ and can be used to track copying of the backup. The operation is
+ associated with the destination backup. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Backup][google.spanner.admin.database.v1.Backup], if
+ successful. Cancelling the returned operation will stop the
+ copying and delete the destination backup. Concurrent CopyBackup
+ requests can run on the same source backup.
+
+ Returns:
+ Callable[[~.CopyBackupRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "copy_backup" not in self._stubs:
+ self._stubs["copy_backup"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup",
+ request_serializer=backup.CopyBackupRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["copy_backup"]
+
@property
def get_backup(self) -> Callable[[backup.GetBackupRequest], backup.Backup]:
r"""Return a callable for the get backup method over gRPC.
@@ -605,7 +812,7 @@ def get_backup(self) -> Callable[[backup.GetBackupRequest], backup.Backup]:
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_backup" not in self._stubs:
- self._stubs["get_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["get_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup",
request_serializer=backup.GetBackupRequest.serialize,
response_deserializer=backup.Backup.deserialize,
@@ -632,7 +839,7 @@ def update_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_backup" not in self._stubs:
- self._stubs["update_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["update_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup",
request_serializer=gsad_backup.UpdateBackupRequest.serialize,
response_deserializer=gsad_backup.Backup.deserialize,
@@ -657,7 +864,7 @@ def delete_backup(self) -> Callable[[backup.DeleteBackupRequest], empty_pb2.Empt
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_backup" not in self._stubs:
- self._stubs["delete_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["delete_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup",
request_serializer=backup.DeleteBackupRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
@@ -685,7 +892,7 @@ def list_backups(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backups" not in self._stubs:
- self._stubs["list_backups"] = self.grpc_channel.unary_unary(
+ self._stubs["list_backups"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups",
request_serializer=backup.ListBackupsRequest.serialize,
response_deserializer=backup.ListBackupsResponse.deserialize,
@@ -731,7 +938,7 @@ def restore_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restore_database" not in self._stubs:
- self._stubs["restore_database"] = self.grpc_channel.unary_unary(
+ self._stubs["restore_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase",
request_serializer=spanner_database_admin.RestoreDatabaseRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -769,7 +976,7 @@ def list_database_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_database_operations" not in self._stubs:
- self._stubs["list_database_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_database_operations"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations",
request_serializer=spanner_database_admin.ListDatabaseOperationsRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabaseOperationsResponse.deserialize,
@@ -808,12 +1015,322 @@ def list_backup_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backup_operations" not in self._stubs:
- self._stubs["list_backup_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_backup_operations"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations",
request_serializer=backup.ListBackupOperationsRequest.serialize,
response_deserializer=backup.ListBackupOperationsResponse.deserialize,
)
return self._stubs["list_backup_operations"]
+ @property
+ def list_database_roles(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabaseRolesRequest],
+ spanner_database_admin.ListDatabaseRolesResponse,
+ ]:
+ r"""Return a callable for the list database roles method over gRPC.
+
+ Lists Cloud Spanner database roles.
+
+ Returns:
+ Callable[[~.ListDatabaseRolesRequest],
+ ~.ListDatabaseRolesResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_database_roles" not in self._stubs:
+ self._stubs["list_database_roles"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles",
+ request_serializer=spanner_database_admin.ListDatabaseRolesRequest.serialize,
+ response_deserializer=spanner_database_admin.ListDatabaseRolesResponse.deserialize,
+ )
+ return self._stubs["list_database_roles"]
+
+ @property
+ def add_split_points(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.AddSplitPointsRequest],
+ spanner_database_admin.AddSplitPointsResponse,
+ ]:
+ r"""Return a callable for the add split points method over gRPC.
+
+ Adds split points to specified tables, indexes of a
+ database.
+
+ Returns:
+ Callable[[~.AddSplitPointsRequest],
+ ~.AddSplitPointsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "add_split_points" not in self._stubs:
+ self._stubs["add_split_points"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/AddSplitPoints",
+ request_serializer=spanner_database_admin.AddSplitPointsRequest.serialize,
+ response_deserializer=spanner_database_admin.AddSplitPointsResponse.deserialize,
+ )
+ return self._stubs["add_split_points"]
+
+ @property
+ def create_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.CreateBackupScheduleRequest],
+ gsad_backup_schedule.BackupSchedule,
+ ]:
+ r"""Return a callable for the create backup schedule method over gRPC.
+
+ Creates a new backup schedule.
+
+ Returns:
+ Callable[[~.CreateBackupScheduleRequest],
+ ~.BackupSchedule]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_backup_schedule" not in self._stubs:
+ self._stubs["create_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule",
+ request_serializer=gsad_backup_schedule.CreateBackupScheduleRequest.serialize,
+ response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["create_backup_schedule"]
+
+ @property
+ def get_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.GetBackupScheduleRequest], backup_schedule.BackupSchedule
+ ]:
+ r"""Return a callable for the get backup schedule method over gRPC.
+
+ Gets backup schedule for the input schedule name.
+
+ Returns:
+ Callable[[~.GetBackupScheduleRequest],
+ ~.BackupSchedule]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_backup_schedule" not in self._stubs:
+ self._stubs["get_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule",
+ request_serializer=backup_schedule.GetBackupScheduleRequest.serialize,
+ response_deserializer=backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["get_backup_schedule"]
+
+ @property
+ def update_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.UpdateBackupScheduleRequest],
+ gsad_backup_schedule.BackupSchedule,
+ ]:
+ r"""Return a callable for the update backup schedule method over gRPC.
+
+ Updates a backup schedule.
+
+ Returns:
+ Callable[[~.UpdateBackupScheduleRequest],
+ ~.BackupSchedule]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_backup_schedule" not in self._stubs:
+ self._stubs["update_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule",
+ request_serializer=gsad_backup_schedule.UpdateBackupScheduleRequest.serialize,
+ response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["update_backup_schedule"]
+
+ @property
+ def delete_backup_schedule(
+ self,
+ ) -> Callable[[backup_schedule.DeleteBackupScheduleRequest], empty_pb2.Empty]:
+ r"""Return a callable for the delete backup schedule method over gRPC.
+
+ Deletes a backup schedule.
+
+ Returns:
+ Callable[[~.DeleteBackupScheduleRequest],
+ ~.Empty]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_backup_schedule" not in self._stubs:
+ self._stubs["delete_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule",
+ request_serializer=backup_schedule.DeleteBackupScheduleRequest.serialize,
+ response_deserializer=empty_pb2.Empty.FromString,
+ )
+ return self._stubs["delete_backup_schedule"]
+
+ @property
+ def list_backup_schedules(
+ self,
+ ) -> Callable[
+ [backup_schedule.ListBackupSchedulesRequest],
+ backup_schedule.ListBackupSchedulesResponse,
+ ]:
+ r"""Return a callable for the list backup schedules method over gRPC.
+
+ Lists all the backup schedules for the database.
+
+ Returns:
+ Callable[[~.ListBackupSchedulesRequest],
+ ~.ListBackupSchedulesResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_backup_schedules" not in self._stubs:
+ self._stubs["list_backup_schedules"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules",
+ request_serializer=backup_schedule.ListBackupSchedulesRequest.serialize,
+ response_deserializer=backup_schedule.ListBackupSchedulesResponse.deserialize,
+ )
+ return self._stubs["list_backup_schedules"]
+
+ @property
+ def internal_update_graph_operation(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.InternalUpdateGraphOperationRequest],
+ spanner_database_admin.InternalUpdateGraphOperationResponse,
+ ]:
+ r"""Return a callable for the internal update graph
+ operation method over gRPC.
+
+ This is an internal API called by Spanner Graph jobs.
+ You should never need to call this API directly.
+
+ Returns:
+ Callable[[~.InternalUpdateGraphOperationRequest],
+ ~.InternalUpdateGraphOperationResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "internal_update_graph_operation" not in self._stubs:
+ self._stubs[
+ "internal_update_graph_operation"
+ ] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/InternalUpdateGraphOperation",
+ request_serializer=spanner_database_admin.InternalUpdateGraphOperationRequest.serialize,
+ response_deserializer=spanner_database_admin.InternalUpdateGraphOperationResponse.deserialize,
+ )
+ return self._stubs["internal_update_graph_operation"]
+
+ def close(self):
+ self._logged_channel.close()
+
+ @property
+ def delete_operation(
+ self,
+ ) -> Callable[[operations_pb2.DeleteOperationRequest], None]:
+ r"""Return a callable for the delete_operation method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_operation" not in self._stubs:
+ self._stubs["delete_operation"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/DeleteOperation",
+ request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,
+ response_deserializer=None,
+ )
+ return self._stubs["delete_operation"]
+
+ @property
+ def cancel_operation(
+ self,
+ ) -> Callable[[operations_pb2.CancelOperationRequest], None]:
+ r"""Return a callable for the cancel_operation method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_operation" not in self._stubs:
+ self._stubs["cancel_operation"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/CancelOperation",
+ request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
+ response_deserializer=None,
+ )
+ return self._stubs["cancel_operation"]
+
+ @property
+ def get_operation(
+ self,
+ ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
+ r"""Return a callable for the get_operation method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_operation" not in self._stubs:
+ self._stubs["get_operation"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/GetOperation",
+ request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["get_operation"]
+
+ @property
+ def list_operations(
+ self,
+ ) -> Callable[
+ [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
+ ]:
+ r"""Return a callable for the list_operations method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_operations" not in self._stubs:
+ self._stubs["list_operations"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/ListOperations",
+ request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
+ response_deserializer=operations_pb2.ListOperationsResponse.FromString,
+ )
+ return self._stubs["list_operations"]
+
+ @property
+ def kind(self) -> str:
+ return "grpc"
+
__all__ = ("DatabaseAdminGrpcTransport",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py
index 49832746ea..5171d84d40 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,21 +13,33 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+import inspect
+import json
+import pickle
+import logging as std_logging
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import grpc_helpers_async # type: ignore
-from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers_async
+from google.api_core import exceptions as core_exceptions
+from google.api_core import retry_async as retries
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
-import packaging.version
+from google.protobuf.json_format import MessageToJson
+import google.protobuf.message
import grpc # type: ignore
+import proto # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
@@ -36,16 +48,94 @@
from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
from .grpc import DatabaseAdminGrpcTransport
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
+
+
+class _LoggingClientAIOInterceptor(
+ grpc.aio.UnaryUnaryClientInterceptor
+): # pragma: NO COVER
+ async def intercept_unary_unary(self, continuation, client_call_details, request):
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ )
+ if logging_enabled: # pragma: NO COVER
+ request_metadata = client_call_details.metadata
+ if isinstance(request, proto.Message):
+ request_payload = type(request).to_json(request)
+ elif isinstance(request, google.protobuf.message.Message):
+ request_payload = MessageToJson(request)
+ else:
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
+
+ request_metadata = {
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
+ for key, value in request_metadata
+ }
+ grpc_request = {
+ "payload": request_payload,
+ "requestMethod": "grpc",
+ "metadata": dict(request_metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for {client_call_details.method}",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": str(client_call_details.method),
+ "request": grpc_request,
+ "metadata": grpc_request["metadata"],
+ },
+ )
+ response = await continuation(client_call_details, request)
+ if logging_enabled: # pragma: NO COVER
+ response_metadata = await response.trailing_metadata()
+ # Convert gRPC metadata `` to list of tuples
+ metadata = (
+ dict([(k, str(v)) for k, v in response_metadata])
+ if response_metadata
+ else None
+ )
+ result = await response
+ if isinstance(result, proto.Message):
+ response_payload = type(result).to_json(result)
+ elif isinstance(result, google.protobuf.message.Message):
+ response_payload = MessageToJson(result)
+ else:
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
+ grpc_response = {
+ "payload": response_payload,
+ "metadata": metadata,
+ "status": "OK",
+ }
+ _LOGGER.debug(
+ f"Received response to rpc {client_call_details.method}.",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": str(client_call_details.method),
+ "response": grpc_response,
+ "metadata": grpc_response["metadata"],
+ },
+ )
+ return response
+
class DatabaseAdminGrpcAsyncIOTransport(DatabaseAdminTransport):
"""gRPC AsyncIO backend transport for DatabaseAdmin.
Cloud Spanner Database Admin API
- The Cloud Spanner Database Admin API can be used to create,
- drop, and list databases. It also enables updating the schema of
- pre-existing databases. It can be also used to create, delete
- and list backups for a database and to restore from an existing
- backup.
+
+ The Cloud Spanner Database Admin API can be used to:
+
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
@@ -62,7 +152,7 @@ class DatabaseAdminGrpcAsyncIOTransport(DatabaseAdminTransport):
def create_channel(
cls,
host: str = "spanner.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -78,7 +168,6 @@ def create_channel(
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -105,51 +194,55 @@ def __init__(
self,
*,
host: str = "spanner.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
- channel: aio.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
- quota_project_id=None,
+ channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
+ api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
- The hostname to connect to.
+ The hostname to connect to (default: 'spanner.googleapis.com').
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if a ``channel`` instance is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if a ``channel`` instance is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
- channel (Optional[aio.Channel]): A ``Channel`` instance through
- which to make calls.
+ channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]):
+ A ``Channel`` instance through which to make calls, or a Callable
+ that constructs and returns one. If set to None, ``self.create_channel``
+ is used to create the channel. If a Callable is given, it will be called
+ with the same arguments as used in ``self.create_channel``.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
- ``client_cert_source`` or applicatin default SSL credentials.
+ ``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
- for grpc channel. It is ignored if ``channel`` is provided.
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
- both in PEM format. It is used to configure mutual TLS channel. It is
- ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ both in PEM format. It is used to configure a mutual TLS channel. It is
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
@@ -169,16 +262,17 @@ def __init__(
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
- self._operations_client = None
+ self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
- if channel:
+ if isinstance(channel, aio.Channel):
# Ignore credentials if a channel was passed.
- credentials = False
+ credentials = None
+ self._ignore_credentials = True
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
@@ -212,13 +306,19 @@ def __init__(
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
+ api_audience=api_audience,
)
if not self._grpc_channel:
- self._grpc_channel = type(self).create_channel(
+ # initialize with the provided callable or the default channel
+ channel_init = channel or type(self).create_channel
+ self._grpc_channel = channel_init(
self._host,
+ # use the credentials which are saved
credentials=self._credentials,
- credentials_file=credentials_file,
+ # Set ``credentials_file`` to ``None`` here as
+ # the credentials that we saved earlier should be used.
+ credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
@@ -228,7 +328,13 @@ def __init__(
],
)
- # Wrap messages. This must be done after self._grpc_channel exists
+ self._interceptor = _LoggingClientAIOInterceptor()
+ self._grpc_channel._unary_unary_interceptors.append(self._interceptor)
+ self._logged_channel = self._grpc_channel
+ self._wrap_with_kind = (
+ "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters
+ )
+ # Wrap messages. This must be done after self._logged_channel exists
self._prep_wrapped_messages(client_info)
@property
@@ -248,10 +354,10 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient:
This property caches on the instance; repeated calls return the same
client.
"""
- # Sanity check: Only create a new client if we do not already have one.
+ # Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
- self.grpc_channel
+ self._logged_channel
)
# Return the client from cache.
@@ -279,7 +385,7 @@ def list_databases(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_databases" not in self._stubs:
- self._stubs["list_databases"] = self.grpc_channel.unary_unary(
+ self._stubs["list_databases"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases",
request_serializer=spanner_database_admin.ListDatabasesRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabasesResponse.deserialize,
@@ -317,7 +423,7 @@ def create_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_database" not in self._stubs:
- self._stubs["create_database"] = self.grpc_channel.unary_unary(
+ self._stubs["create_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase",
request_serializer=spanner_database_admin.CreateDatabaseRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -346,13 +452,79 @@ def get_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database" not in self._stubs:
- self._stubs["get_database"] = self.grpc_channel.unary_unary(
+ self._stubs["get_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase",
request_serializer=spanner_database_admin.GetDatabaseRequest.serialize,
response_deserializer=spanner_database_admin.Database.deserialize,
)
return self._stubs["get_database"]
+ @property
+ def update_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseRequest],
+ Awaitable[operations_pb2.Operation],
+ ]:
+ r"""Return a callable for the update database method over gRPC.
+
+ Updates a Cloud Spanner database. The returned [long-running
+ operation][google.longrunning.Operation] can be used to track
+ the progress of updating the database. If the named database
+ does not exist, returns ``NOT_FOUND``.
+
+ While the operation is pending:
+
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field is set to true.
+ - Cancelling the operation is best-effort. If the cancellation
+ succeeds, the operation metadata's
+ [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ is set, the updates are reverted, and the operation terminates
+ with a ``CANCELLED`` status.
+ - New UpdateDatabase requests will return a
+ ``FAILED_PRECONDITION`` error until the pending operation is
+ done (returns successfully or with error).
+ - Reading the database via the API continues to give the
+ pre-request values.
+
+ Upon completion of the returned operation:
+
+ - The new values are in effect and readable via the API.
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field becomes false.
+
+ The returned [long-running
+ operation][google.longrunning.Operation] will have a name of the
+ format
+ ``projects//instances//databases//operations/``
+ and can be used to track the database modification. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Database][google.spanner.admin.database.v1.Database], if
+ successful.
+
+ Returns:
+ Callable[[~.UpdateDatabaseRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_database" not in self._stubs:
+ self._stubs["update_database"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase",
+ request_serializer=spanner_database_admin.UpdateDatabaseRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["update_database"]
+
@property
def update_database_ddl(
self,
@@ -383,7 +555,7 @@ def update_database_ddl(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_database_ddl" not in self._stubs:
- self._stubs["update_database_ddl"] = self.grpc_channel.unary_unary(
+ self._stubs["update_database_ddl"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl",
request_serializer=spanner_database_admin.UpdateDatabaseDdlRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -400,7 +572,8 @@ def drop_database(
Drops (aka deletes) a Cloud Spanner database. Completed backups
for the database will be retained according to their
- ``expire_time``.
+ ``expire_time``. Note: Cloud Spanner might continue to accept
+ requests for a few seconds after the database has been deleted.
Returns:
Callable[[~.DropDatabaseRequest],
@@ -413,7 +586,7 @@ def drop_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "drop_database" not in self._stubs:
- self._stubs["drop_database"] = self.grpc_channel.unary_unary(
+ self._stubs["drop_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase",
request_serializer=spanner_database_admin.DropDatabaseRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
@@ -445,7 +618,7 @@ def get_database_ddl(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database_ddl" not in self._stubs:
- self._stubs["get_database_ddl"] = self.grpc_channel.unary_unary(
+ self._stubs["get_database_ddl"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl",
request_serializer=spanner_database_admin.GetDatabaseDdlRequest.serialize,
response_deserializer=spanner_database_admin.GetDatabaseDdlResponse.deserialize,
@@ -479,7 +652,7 @@ def set_iam_policy(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
- self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
+ self._stubs["set_iam_policy"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
@@ -514,7 +687,7 @@ def get_iam_policy(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
- self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
+ self._stubs["get_iam_policy"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
@@ -552,7 +725,7 @@ def test_iam_permissions(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
- self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
+ self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
@@ -592,13 +765,51 @@ def create_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_backup" not in self._stubs:
- self._stubs["create_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["create_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup",
request_serializer=gsad_backup.CreateBackupRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_backup"]
+ @property
+ def copy_backup(
+ self,
+ ) -> Callable[[backup.CopyBackupRequest], Awaitable[operations_pb2.Operation]]:
+ r"""Return a callable for the copy backup method over gRPC.
+
+ Starts copying a Cloud Spanner Backup. The returned backup
+ [long-running operation][google.longrunning.Operation] will have
+ a name of the format
+ ``projects//instances//backups//operations/``
+ and can be used to track copying of the backup. The operation is
+ associated with the destination backup. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Backup][google.spanner.admin.database.v1.Backup], if
+ successful. Cancelling the returned operation will stop the
+ copying and delete the destination backup. Concurrent CopyBackup
+ requests can run on the same source backup.
+
+ Returns:
+ Callable[[~.CopyBackupRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "copy_backup" not in self._stubs:
+ self._stubs["copy_backup"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup",
+ request_serializer=backup.CopyBackupRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["copy_backup"]
+
@property
def get_backup(
self,
@@ -619,7 +830,7 @@ def get_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_backup" not in self._stubs:
- self._stubs["get_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["get_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup",
request_serializer=backup.GetBackupRequest.serialize,
response_deserializer=backup.Backup.deserialize,
@@ -646,7 +857,7 @@ def update_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_backup" not in self._stubs:
- self._stubs["update_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["update_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup",
request_serializer=gsad_backup.UpdateBackupRequest.serialize,
response_deserializer=gsad_backup.Backup.deserialize,
@@ -673,7 +884,7 @@ def delete_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_backup" not in self._stubs:
- self._stubs["delete_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["delete_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup",
request_serializer=backup.DeleteBackupRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
@@ -701,7 +912,7 @@ def list_backups(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backups" not in self._stubs:
- self._stubs["list_backups"] = self.grpc_channel.unary_unary(
+ self._stubs["list_backups"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups",
request_serializer=backup.ListBackupsRequest.serialize,
response_deserializer=backup.ListBackupsResponse.deserialize,
@@ -748,7 +959,7 @@ def restore_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restore_database" not in self._stubs:
- self._stubs["restore_database"] = self.grpc_channel.unary_unary(
+ self._stubs["restore_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase",
request_serializer=spanner_database_admin.RestoreDatabaseRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -786,7 +997,7 @@ def list_database_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_database_operations" not in self._stubs:
- self._stubs["list_database_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_database_operations"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations",
request_serializer=spanner_database_admin.ListDatabaseOperationsRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabaseOperationsResponse.deserialize,
@@ -826,12 +1037,690 @@ def list_backup_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backup_operations" not in self._stubs:
- self._stubs["list_backup_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_backup_operations"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations",
request_serializer=backup.ListBackupOperationsRequest.serialize,
response_deserializer=backup.ListBackupOperationsResponse.deserialize,
)
return self._stubs["list_backup_operations"]
+ @property
+ def list_database_roles(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabaseRolesRequest],
+ Awaitable[spanner_database_admin.ListDatabaseRolesResponse],
+ ]:
+ r"""Return a callable for the list database roles method over gRPC.
+
+ Lists Cloud Spanner database roles.
+
+ Returns:
+ Callable[[~.ListDatabaseRolesRequest],
+ Awaitable[~.ListDatabaseRolesResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_database_roles" not in self._stubs:
+ self._stubs["list_database_roles"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles",
+ request_serializer=spanner_database_admin.ListDatabaseRolesRequest.serialize,
+ response_deserializer=spanner_database_admin.ListDatabaseRolesResponse.deserialize,
+ )
+ return self._stubs["list_database_roles"]
+
+ @property
+ def add_split_points(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.AddSplitPointsRequest],
+ Awaitable[spanner_database_admin.AddSplitPointsResponse],
+ ]:
+ r"""Return a callable for the add split points method over gRPC.
+
+ Adds split points to specified tables, indexes of a
+ database.
+
+ Returns:
+ Callable[[~.AddSplitPointsRequest],
+ Awaitable[~.AddSplitPointsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "add_split_points" not in self._stubs:
+ self._stubs["add_split_points"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/AddSplitPoints",
+ request_serializer=spanner_database_admin.AddSplitPointsRequest.serialize,
+ response_deserializer=spanner_database_admin.AddSplitPointsResponse.deserialize,
+ )
+ return self._stubs["add_split_points"]
+
+ @property
+ def create_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.CreateBackupScheduleRequest],
+ Awaitable[gsad_backup_schedule.BackupSchedule],
+ ]:
+ r"""Return a callable for the create backup schedule method over gRPC.
+
+ Creates a new backup schedule.
+
+ Returns:
+ Callable[[~.CreateBackupScheduleRequest],
+ Awaitable[~.BackupSchedule]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_backup_schedule" not in self._stubs:
+ self._stubs["create_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule",
+ request_serializer=gsad_backup_schedule.CreateBackupScheduleRequest.serialize,
+ response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["create_backup_schedule"]
+
+ @property
+ def get_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.GetBackupScheduleRequest],
+ Awaitable[backup_schedule.BackupSchedule],
+ ]:
+ r"""Return a callable for the get backup schedule method over gRPC.
+
+ Gets backup schedule for the input schedule name.
+
+ Returns:
+ Callable[[~.GetBackupScheduleRequest],
+ Awaitable[~.BackupSchedule]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_backup_schedule" not in self._stubs:
+ self._stubs["get_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule",
+ request_serializer=backup_schedule.GetBackupScheduleRequest.serialize,
+ response_deserializer=backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["get_backup_schedule"]
+
+ @property
+ def update_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.UpdateBackupScheduleRequest],
+ Awaitable[gsad_backup_schedule.BackupSchedule],
+ ]:
+ r"""Return a callable for the update backup schedule method over gRPC.
+
+ Updates a backup schedule.
+
+ Returns:
+ Callable[[~.UpdateBackupScheduleRequest],
+ Awaitable[~.BackupSchedule]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_backup_schedule" not in self._stubs:
+ self._stubs["update_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule",
+ request_serializer=gsad_backup_schedule.UpdateBackupScheduleRequest.serialize,
+ response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["update_backup_schedule"]
+
+ @property
+ def delete_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.DeleteBackupScheduleRequest], Awaitable[empty_pb2.Empty]
+ ]:
+ r"""Return a callable for the delete backup schedule method over gRPC.
+
+ Deletes a backup schedule.
+
+ Returns:
+ Callable[[~.DeleteBackupScheduleRequest],
+ Awaitable[~.Empty]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_backup_schedule" not in self._stubs:
+ self._stubs["delete_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule",
+ request_serializer=backup_schedule.DeleteBackupScheduleRequest.serialize,
+ response_deserializer=empty_pb2.Empty.FromString,
+ )
+ return self._stubs["delete_backup_schedule"]
+
+ @property
+ def list_backup_schedules(
+ self,
+ ) -> Callable[
+ [backup_schedule.ListBackupSchedulesRequest],
+ Awaitable[backup_schedule.ListBackupSchedulesResponse],
+ ]:
+ r"""Return a callable for the list backup schedules method over gRPC.
+
+ Lists all the backup schedules for the database.
+
+ Returns:
+ Callable[[~.ListBackupSchedulesRequest],
+ Awaitable[~.ListBackupSchedulesResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_backup_schedules" not in self._stubs:
+ self._stubs["list_backup_schedules"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules",
+ request_serializer=backup_schedule.ListBackupSchedulesRequest.serialize,
+ response_deserializer=backup_schedule.ListBackupSchedulesResponse.deserialize,
+ )
+ return self._stubs["list_backup_schedules"]
+
+ @property
+ def internal_update_graph_operation(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.InternalUpdateGraphOperationRequest],
+ Awaitable[spanner_database_admin.InternalUpdateGraphOperationResponse],
+ ]:
+ r"""Return a callable for the internal update graph
+ operation method over gRPC.
+
+ This is an internal API called by Spanner Graph jobs.
+ You should never need to call this API directly.
+
+ Returns:
+ Callable[[~.InternalUpdateGraphOperationRequest],
+ Awaitable[~.InternalUpdateGraphOperationResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "internal_update_graph_operation" not in self._stubs:
+ self._stubs[
+ "internal_update_graph_operation"
+ ] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/InternalUpdateGraphOperation",
+ request_serializer=spanner_database_admin.InternalUpdateGraphOperationRequest.serialize,
+ response_deserializer=spanner_database_admin.InternalUpdateGraphOperationResponse.deserialize,
+ )
+ return self._stubs["internal_update_graph_operation"]
+
+ def _prep_wrapped_messages(self, client_info):
+ """Precompute the wrapped methods, overriding the base class method to use async wrappers."""
+ self._wrapped_methods = {
+ self.list_databases: self._wrap_method(
+ self.list_databases,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.create_database: self._wrap_method(
+ self.create_database,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_database: self._wrap_method(
+ self.get_database,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_database: self._wrap_method(
+ self.update_database,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_database_ddl: self._wrap_method(
+ self.update_database_ddl,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.drop_database: self._wrap_method(
+ self.drop_database,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_database_ddl: self._wrap_method(
+ self.get_database_ddl,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.set_iam_policy: self._wrap_method(
+ self.set_iam_policy,
+ default_timeout=30.0,
+ client_info=client_info,
+ ),
+ self.get_iam_policy: self._wrap_method(
+ self.get_iam_policy,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=30.0,
+ ),
+ default_timeout=30.0,
+ client_info=client_info,
+ ),
+ self.test_iam_permissions: self._wrap_method(
+ self.test_iam_permissions,
+ default_timeout=30.0,
+ client_info=client_info,
+ ),
+ self.create_backup: self._wrap_method(
+ self.create_backup,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.copy_backup: self._wrap_method(
+ self.copy_backup,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_backup: self._wrap_method(
+ self.get_backup,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_backup: self._wrap_method(
+ self.update_backup,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.delete_backup: self._wrap_method(
+ self.delete_backup,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_backups: self._wrap_method(
+ self.list_backups,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.restore_database: self._wrap_method(
+ self.restore_database,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_database_operations: self._wrap_method(
+ self.list_database_operations,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_backup_operations: self._wrap_method(
+ self.list_backup_operations,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_database_roles: self._wrap_method(
+ self.list_database_roles,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.add_split_points: self._wrap_method(
+ self.add_split_points,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.create_backup_schedule: self._wrap_method(
+ self.create_backup_schedule,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_backup_schedule: self._wrap_method(
+ self.get_backup_schedule,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_backup_schedule: self._wrap_method(
+ self.update_backup_schedule,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.delete_backup_schedule: self._wrap_method(
+ self.delete_backup_schedule,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_backup_schedules: self._wrap_method(
+ self.list_backup_schedules,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.internal_update_graph_operation: self._wrap_method(
+ self.internal_update_graph_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.cancel_operation: self._wrap_method(
+ self.cancel_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_operation: self._wrap_method(
+ self.delete_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_operation: self._wrap_method(
+ self.get_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_operations: self._wrap_method(
+ self.list_operations,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ }
+
+ def _wrap_method(self, func, *args, **kwargs):
+ if self._wrap_with_kind: # pragma: NO COVER
+ kwargs["kind"] = self.kind
+ return gapic_v1.method_async.wrap_method(func, *args, **kwargs)
+
+ def close(self):
+ return self._logged_channel.close()
+
+ @property
+ def kind(self) -> str:
+ return "grpc_asyncio"
+
+ @property
+ def delete_operation(
+ self,
+ ) -> Callable[[operations_pb2.DeleteOperationRequest], None]:
+ r"""Return a callable for the delete_operation method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_operation" not in self._stubs:
+ self._stubs["delete_operation"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/DeleteOperation",
+ request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,
+ response_deserializer=None,
+ )
+ return self._stubs["delete_operation"]
+
+ @property
+ def cancel_operation(
+ self,
+ ) -> Callable[[operations_pb2.CancelOperationRequest], None]:
+ r"""Return a callable for the cancel_operation method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_operation" not in self._stubs:
+ self._stubs["cancel_operation"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/CancelOperation",
+ request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
+ response_deserializer=None,
+ )
+ return self._stubs["cancel_operation"]
+
+ @property
+ def get_operation(
+ self,
+ ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
+ r"""Return a callable for the get_operation method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_operation" not in self._stubs:
+ self._stubs["get_operation"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/GetOperation",
+ request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["get_operation"]
+
+ @property
+ def list_operations(
+ self,
+ ) -> Callable[
+ [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
+ ]:
+ r"""Return a callable for the list_operations method over gRPC."""
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_operations" not in self._stubs:
+ self._stubs["list_operations"] = self._logged_channel.unary_unary(
+ "/google.longrunning.Operations/ListOperations",
+ request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
+ response_deserializer=operations_pb2.ListOperationsResponse.FromString,
+ )
+ return self._stubs["list_operations"]
+
__all__ = ("DatabaseAdminGrpcAsyncIOTransport",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py
new file mode 100644
index 0000000000..df70fc5636
--- /dev/null
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py
@@ -0,0 +1,6550 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import logging
+import json # type: ignore
+
+from google.auth.transport.requests import AuthorizedSession # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
+from google.api_core import exceptions as core_exceptions
+from google.api_core import retry as retries
+from google.api_core import rest_helpers
+from google.api_core import rest_streaming
+from google.api_core import gapic_v1
+import google.protobuf
+
+from google.protobuf import json_format
+from google.api_core import operations_v1
+
+from requests import __version__ as requests_version
+import dataclasses
+from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
+import warnings
+
+
+from google.cloud.spanner_admin_database_v1.types import backup
+from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
+from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+
+
+from .rest_base import _BaseDatabaseAdminRestTransport
+from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
+
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = logging.getLogger(__name__)
+
+DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
+ grpc_version=None,
+ rest_version=f"requests@{requests_version}",
+)
+
+if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
+ DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
+
+
+class DatabaseAdminRestInterceptor:
+ """Interceptor for DatabaseAdmin.
+
+ Interceptors are used to manipulate requests, request metadata, and responses
+ in arbitrary ways.
+ Example use cases include:
+ * Logging
+ * Verifying requests according to service or custom semantics
+ * Stripping extraneous information from responses
+
+ These use cases and more can be enabled by injecting an
+ instance of a custom subclass when constructing the DatabaseAdminRestTransport.
+
+ .. code-block:: python
+ class MyCustomDatabaseAdminInterceptor(DatabaseAdminRestInterceptor):
+ def pre_add_split_points(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_add_split_points(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_copy_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_copy_backup(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_create_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_create_backup(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_create_backup_schedule(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_create_backup_schedule(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_create_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_create_database(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_delete_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def pre_delete_backup_schedule(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def pre_drop_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def pre_get_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_backup(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_get_backup_schedule(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_backup_schedule(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_get_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_database(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_get_database_ddl(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_database_ddl(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_get_iam_policy(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_iam_policy(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_internal_update_graph_operation(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_internal_update_graph_operation(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_backup_operations(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_backup_operations(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_backups(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_backups(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_backup_schedules(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_backup_schedules(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_database_operations(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_database_operations(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_database_roles(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_database_roles(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_databases(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_databases(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_restore_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_restore_database(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_set_iam_policy(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_set_iam_policy(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_test_iam_permissions(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_test_iam_permissions(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_update_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_update_backup(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_update_backup_schedule(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_update_backup_schedule(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_update_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_update_database(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_update_database_ddl(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_update_database_ddl(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ transport = DatabaseAdminRestTransport(interceptor=MyCustomDatabaseAdminInterceptor())
+ client = DatabaseAdminClient(transport=transport)
+
+
+ """
+
+ def pre_add_split_points(
+ self,
+ request: spanner_database_admin.AddSplitPointsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.AddSplitPointsRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for add_split_points
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_add_split_points(
+ self, response: spanner_database_admin.AddSplitPointsResponse
+ ) -> spanner_database_admin.AddSplitPointsResponse:
+ """Post-rpc interceptor for add_split_points
+
+ DEPRECATED. Please use the `post_add_split_points_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_add_split_points` interceptor runs
+ before the `post_add_split_points_with_metadata` interceptor.
+ """
+ return response
+
+ def post_add_split_points_with_metadata(
+ self,
+ response: spanner_database_admin.AddSplitPointsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.AddSplitPointsResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for add_split_points
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_add_split_points_with_metadata`
+ interceptor in new development instead of the `post_add_split_points` interceptor.
+ When both interceptors are used, this `post_add_split_points_with_metadata` interceptor runs after the
+ `post_add_split_points` interceptor. The (possibly modified) response returned by
+ `post_add_split_points` will be passed to
+ `post_add_split_points_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_copy_backup(
+ self,
+ request: backup.CopyBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.CopyBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Pre-rpc interceptor for copy_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_copy_backup(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for copy_backup
+
+ DEPRECATED. Please use the `post_copy_backup_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_copy_backup` interceptor runs
+ before the `post_copy_backup_with_metadata` interceptor.
+ """
+ return response
+
+ def post_copy_backup_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for copy_backup
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_copy_backup_with_metadata`
+ interceptor in new development instead of the `post_copy_backup` interceptor.
+ When both interceptors are used, this `post_copy_backup_with_metadata` interceptor runs after the
+ `post_copy_backup` interceptor. The (possibly modified) response returned by
+ `post_copy_backup` will be passed to
+ `post_copy_backup_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_create_backup(
+ self,
+ request: gsad_backup.CreateBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup.CreateBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for create_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_create_backup(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for create_backup
+
+ DEPRECATED. Please use the `post_create_backup_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_create_backup` interceptor runs
+ before the `post_create_backup_with_metadata` interceptor.
+ """
+ return response
+
+ def post_create_backup_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for create_backup
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_create_backup_with_metadata`
+ interceptor in new development instead of the `post_create_backup` interceptor.
+ When both interceptors are used, this `post_create_backup_with_metadata` interceptor runs after the
+ `post_create_backup` interceptor. The (possibly modified) response returned by
+ `post_create_backup` will be passed to
+ `post_create_backup_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_create_backup_schedule(
+ self,
+ request: gsad_backup_schedule.CreateBackupScheduleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup_schedule.CreateBackupScheduleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for create_backup_schedule
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_create_backup_schedule(
+ self, response: gsad_backup_schedule.BackupSchedule
+ ) -> gsad_backup_schedule.BackupSchedule:
+ """Post-rpc interceptor for create_backup_schedule
+
+ DEPRECATED. Please use the `post_create_backup_schedule_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_create_backup_schedule` interceptor runs
+ before the `post_create_backup_schedule_with_metadata` interceptor.
+ """
+ return response
+
+ def post_create_backup_schedule_with_metadata(
+ self,
+ response: gsad_backup_schedule.BackupSchedule,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup_schedule.BackupSchedule, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Post-rpc interceptor for create_backup_schedule
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_create_backup_schedule_with_metadata`
+ interceptor in new development instead of the `post_create_backup_schedule` interceptor.
+ When both interceptors are used, this `post_create_backup_schedule_with_metadata` interceptor runs after the
+ `post_create_backup_schedule` interceptor. The (possibly modified) response returned by
+ `post_create_backup_schedule` will be passed to
+ `post_create_backup_schedule_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_create_database(
+ self,
+ request: spanner_database_admin.CreateDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.CreateDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for create_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_create_database(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for create_database
+
+ DEPRECATED. Please use the `post_create_database_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_create_database` interceptor runs
+ before the `post_create_database_with_metadata` interceptor.
+ """
+ return response
+
+ def post_create_database_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for create_database
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_create_database_with_metadata`
+ interceptor in new development instead of the `post_create_database` interceptor.
+ When both interceptors are used, this `post_create_database_with_metadata` interceptor runs after the
+ `post_create_database` interceptor. The (possibly modified) response returned by
+ `post_create_database` will be passed to
+ `post_create_database_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_delete_backup(
+ self,
+ request: backup.DeleteBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.DeleteBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Pre-rpc interceptor for delete_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def pre_delete_backup_schedule(
+ self,
+ request: backup_schedule.DeleteBackupScheduleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup_schedule.DeleteBackupScheduleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for delete_backup_schedule
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def pre_drop_database(
+ self,
+ request: spanner_database_admin.DropDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.DropDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for drop_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def pre_get_backup(
+ self,
+ request: backup.GetBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.GetBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Pre-rpc interceptor for get_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_backup(self, response: backup.Backup) -> backup.Backup:
+ """Post-rpc interceptor for get_backup
+
+ DEPRECATED. Please use the `post_get_backup_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_backup` interceptor runs
+ before the `post_get_backup_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_backup_with_metadata(
+ self, response: backup.Backup, metadata: Sequence[Tuple[str, Union[str, bytes]]]
+ ) -> Tuple[backup.Backup, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for get_backup
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_backup_with_metadata`
+ interceptor in new development instead of the `post_get_backup` interceptor.
+ When both interceptors are used, this `post_get_backup_with_metadata` interceptor runs after the
+ `post_get_backup` interceptor. The (possibly modified) response returned by
+ `post_get_backup` will be passed to
+ `post_get_backup_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_get_backup_schedule(
+ self,
+ request: backup_schedule.GetBackupScheduleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup_schedule.GetBackupScheduleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for get_backup_schedule
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_backup_schedule(
+ self, response: backup_schedule.BackupSchedule
+ ) -> backup_schedule.BackupSchedule:
+ """Post-rpc interceptor for get_backup_schedule
+
+ DEPRECATED. Please use the `post_get_backup_schedule_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_backup_schedule` interceptor runs
+ before the `post_get_backup_schedule_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_backup_schedule_with_metadata(
+ self,
+ response: backup_schedule.BackupSchedule,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup_schedule.BackupSchedule, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for get_backup_schedule
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_backup_schedule_with_metadata`
+ interceptor in new development instead of the `post_get_backup_schedule` interceptor.
+ When both interceptors are used, this `post_get_backup_schedule_with_metadata` interceptor runs after the
+ `post_get_backup_schedule` interceptor. The (possibly modified) response returned by
+ `post_get_backup_schedule` will be passed to
+ `post_get_backup_schedule_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_get_database(
+ self,
+ request: spanner_database_admin.GetDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.GetDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for get_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_database(
+ self, response: spanner_database_admin.Database
+ ) -> spanner_database_admin.Database:
+ """Post-rpc interceptor for get_database
+
+ DEPRECATED. Please use the `post_get_database_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_database` interceptor runs
+ before the `post_get_database_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_database_with_metadata(
+ self,
+ response: spanner_database_admin.Database,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.Database, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Post-rpc interceptor for get_database
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_database_with_metadata`
+ interceptor in new development instead of the `post_get_database` interceptor.
+ When both interceptors are used, this `post_get_database_with_metadata` interceptor runs after the
+ `post_get_database` interceptor. The (possibly modified) response returned by
+ `post_get_database` will be passed to
+ `post_get_database_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_get_database_ddl(
+ self,
+ request: spanner_database_admin.GetDatabaseDdlRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.GetDatabaseDdlRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for get_database_ddl
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_database_ddl(
+ self, response: spanner_database_admin.GetDatabaseDdlResponse
+ ) -> spanner_database_admin.GetDatabaseDdlResponse:
+ """Post-rpc interceptor for get_database_ddl
+
+ DEPRECATED. Please use the `post_get_database_ddl_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_database_ddl` interceptor runs
+ before the `post_get_database_ddl_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_database_ddl_with_metadata(
+ self,
+ response: spanner_database_admin.GetDatabaseDdlResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.GetDatabaseDdlResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for get_database_ddl
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_database_ddl_with_metadata`
+ interceptor in new development instead of the `post_get_database_ddl` interceptor.
+ When both interceptors are used, this `post_get_database_ddl_with_metadata` interceptor runs after the
+ `post_get_database_ddl` interceptor. The (possibly modified) response returned by
+ `post_get_database_ddl` will be passed to
+ `post_get_database_ddl_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_get_iam_policy(
+ self,
+ request: iam_policy_pb2.GetIamPolicyRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for get_iam_policy
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy:
+ """Post-rpc interceptor for get_iam_policy
+
+ DEPRECATED. Please use the `post_get_iam_policy_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_iam_policy` interceptor runs
+ before the `post_get_iam_policy_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_iam_policy_with_metadata(
+ self,
+ response: policy_pb2.Policy,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for get_iam_policy
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_iam_policy_with_metadata`
+ interceptor in new development instead of the `post_get_iam_policy` interceptor.
+ When both interceptors are used, this `post_get_iam_policy_with_metadata` interceptor runs after the
+ `post_get_iam_policy` interceptor. The (possibly modified) response returned by
+ `post_get_iam_policy` will be passed to
+ `post_get_iam_policy_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_backup_operations(
+ self,
+ request: backup.ListBackupOperationsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup.ListBackupOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for list_backup_operations
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_backup_operations(
+ self, response: backup.ListBackupOperationsResponse
+ ) -> backup.ListBackupOperationsResponse:
+ """Post-rpc interceptor for list_backup_operations
+
+ DEPRECATED. Please use the `post_list_backup_operations_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_backup_operations` interceptor runs
+ before the `post_list_backup_operations_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_backup_operations_with_metadata(
+ self,
+ response: backup.ListBackupOperationsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup.ListBackupOperationsResponse, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Post-rpc interceptor for list_backup_operations
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_backup_operations_with_metadata`
+ interceptor in new development instead of the `post_list_backup_operations` interceptor.
+ When both interceptors are used, this `post_list_backup_operations_with_metadata` interceptor runs after the
+ `post_list_backup_operations` interceptor. The (possibly modified) response returned by
+ `post_list_backup_operations` will be passed to
+ `post_list_backup_operations_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_backups(
+ self,
+ request: backup.ListBackupsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.ListBackupsRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Pre-rpc interceptor for list_backups
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_backups(
+ self, response: backup.ListBackupsResponse
+ ) -> backup.ListBackupsResponse:
+ """Post-rpc interceptor for list_backups
+
+ DEPRECATED. Please use the `post_list_backups_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_backups` interceptor runs
+ before the `post_list_backups_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_backups_with_metadata(
+ self,
+ response: backup.ListBackupsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.ListBackupsResponse, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for list_backups
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_backups_with_metadata`
+ interceptor in new development instead of the `post_list_backups` interceptor.
+ When both interceptors are used, this `post_list_backups_with_metadata` interceptor runs after the
+ `post_list_backups` interceptor. The (possibly modified) response returned by
+ `post_list_backups` will be passed to
+ `post_list_backups_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_backup_schedules(
+ self,
+ request: backup_schedule.ListBackupSchedulesRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup_schedule.ListBackupSchedulesRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for list_backup_schedules
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_backup_schedules(
+ self, response: backup_schedule.ListBackupSchedulesResponse
+ ) -> backup_schedule.ListBackupSchedulesResponse:
+ """Post-rpc interceptor for list_backup_schedules
+
+ DEPRECATED. Please use the `post_list_backup_schedules_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_backup_schedules` interceptor runs
+ before the `post_list_backup_schedules_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_backup_schedules_with_metadata(
+ self,
+ response: backup_schedule.ListBackupSchedulesResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup_schedule.ListBackupSchedulesResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for list_backup_schedules
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_backup_schedules_with_metadata`
+ interceptor in new development instead of the `post_list_backup_schedules` interceptor.
+ When both interceptors are used, this `post_list_backup_schedules_with_metadata` interceptor runs after the
+ `post_list_backup_schedules` interceptor. The (possibly modified) response returned by
+ `post_list_backup_schedules` will be passed to
+ `post_list_backup_schedules_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_database_operations(
+ self,
+ request: spanner_database_admin.ListDatabaseOperationsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabaseOperationsRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for list_database_operations
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_database_operations(
+ self, response: spanner_database_admin.ListDatabaseOperationsResponse
+ ) -> spanner_database_admin.ListDatabaseOperationsResponse:
+ """Post-rpc interceptor for list_database_operations
+
+ DEPRECATED. Please use the `post_list_database_operations_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_database_operations` interceptor runs
+ before the `post_list_database_operations_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_database_operations_with_metadata(
+ self,
+ response: spanner_database_admin.ListDatabaseOperationsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabaseOperationsResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for list_database_operations
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_database_operations_with_metadata`
+ interceptor in new development instead of the `post_list_database_operations` interceptor.
+ When both interceptors are used, this `post_list_database_operations_with_metadata` interceptor runs after the
+ `post_list_database_operations` interceptor. The (possibly modified) response returned by
+ `post_list_database_operations` will be passed to
+ `post_list_database_operations_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_database_roles(
+ self,
+ request: spanner_database_admin.ListDatabaseRolesRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabaseRolesRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for list_database_roles
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_database_roles(
+ self, response: spanner_database_admin.ListDatabaseRolesResponse
+ ) -> spanner_database_admin.ListDatabaseRolesResponse:
+ """Post-rpc interceptor for list_database_roles
+
+ DEPRECATED. Please use the `post_list_database_roles_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_database_roles` interceptor runs
+ before the `post_list_database_roles_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_database_roles_with_metadata(
+ self,
+ response: spanner_database_admin.ListDatabaseRolesResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabaseRolesResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for list_database_roles
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_database_roles_with_metadata`
+ interceptor in new development instead of the `post_list_database_roles` interceptor.
+ When both interceptors are used, this `post_list_database_roles_with_metadata` interceptor runs after the
+ `post_list_database_roles` interceptor. The (possibly modified) response returned by
+ `post_list_database_roles` will be passed to
+ `post_list_database_roles_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_databases(
+ self,
+ request: spanner_database_admin.ListDatabasesRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabasesRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for list_databases
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_databases(
+ self, response: spanner_database_admin.ListDatabasesResponse
+ ) -> spanner_database_admin.ListDatabasesResponse:
+ """Post-rpc interceptor for list_databases
+
+ DEPRECATED. Please use the `post_list_databases_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_databases` interceptor runs
+ before the `post_list_databases_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_databases_with_metadata(
+ self,
+ response: spanner_database_admin.ListDatabasesResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabasesResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for list_databases
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_databases_with_metadata`
+ interceptor in new development instead of the `post_list_databases` interceptor.
+ When both interceptors are used, this `post_list_databases_with_metadata` interceptor runs after the
+ `post_list_databases` interceptor. The (possibly modified) response returned by
+ `post_list_databases` will be passed to
+ `post_list_databases_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_restore_database(
+ self,
+ request: spanner_database_admin.RestoreDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.RestoreDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for restore_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_restore_database(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for restore_database
+
+ DEPRECATED. Please use the `post_restore_database_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_restore_database` interceptor runs
+ before the `post_restore_database_with_metadata` interceptor.
+ """
+ return response
+
+ def post_restore_database_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for restore_database
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_restore_database_with_metadata`
+ interceptor in new development instead of the `post_restore_database` interceptor.
+ When both interceptors are used, this `post_restore_database_with_metadata` interceptor runs after the
+ `post_restore_database` interceptor. The (possibly modified) response returned by
+ `post_restore_database` will be passed to
+ `post_restore_database_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_set_iam_policy(
+ self,
+ request: iam_policy_pb2.SetIamPolicyRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for set_iam_policy
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy:
+ """Post-rpc interceptor for set_iam_policy
+
+ DEPRECATED. Please use the `post_set_iam_policy_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_set_iam_policy` interceptor runs
+ before the `post_set_iam_policy_with_metadata` interceptor.
+ """
+ return response
+
+ def post_set_iam_policy_with_metadata(
+ self,
+ response: policy_pb2.Policy,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for set_iam_policy
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_set_iam_policy_with_metadata`
+ interceptor in new development instead of the `post_set_iam_policy` interceptor.
+ When both interceptors are used, this `post_set_iam_policy_with_metadata` interceptor runs after the
+ `post_set_iam_policy` interceptor. The (possibly modified) response returned by
+ `post_set_iam_policy` will be passed to
+ `post_set_iam_policy_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_test_iam_permissions(
+ self,
+ request: iam_policy_pb2.TestIamPermissionsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ iam_policy_pb2.TestIamPermissionsRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for test_iam_permissions
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_test_iam_permissions(
+ self, response: iam_policy_pb2.TestIamPermissionsResponse
+ ) -> iam_policy_pb2.TestIamPermissionsResponse:
+ """Post-rpc interceptor for test_iam_permissions
+
+ DEPRECATED. Please use the `post_test_iam_permissions_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_test_iam_permissions` interceptor runs
+ before the `post_test_iam_permissions_with_metadata` interceptor.
+ """
+ return response
+
+ def post_test_iam_permissions_with_metadata(
+ self,
+ response: iam_policy_pb2.TestIamPermissionsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ iam_policy_pb2.TestIamPermissionsResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for test_iam_permissions
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_test_iam_permissions_with_metadata`
+ interceptor in new development instead of the `post_test_iam_permissions` interceptor.
+ When both interceptors are used, this `post_test_iam_permissions_with_metadata` interceptor runs after the
+ `post_test_iam_permissions` interceptor. The (possibly modified) response returned by
+ `post_test_iam_permissions` will be passed to
+ `post_test_iam_permissions_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_update_backup(
+ self,
+ request: gsad_backup.UpdateBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup.UpdateBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for update_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_update_backup(self, response: gsad_backup.Backup) -> gsad_backup.Backup:
+ """Post-rpc interceptor for update_backup
+
+ DEPRECATED. Please use the `post_update_backup_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_update_backup` interceptor runs
+ before the `post_update_backup_with_metadata` interceptor.
+ """
+ return response
+
+ def post_update_backup_with_metadata(
+ self,
+ response: gsad_backup.Backup,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[gsad_backup.Backup, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for update_backup
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_update_backup_with_metadata`
+ interceptor in new development instead of the `post_update_backup` interceptor.
+ When both interceptors are used, this `post_update_backup_with_metadata` interceptor runs after the
+ `post_update_backup` interceptor. The (possibly modified) response returned by
+ `post_update_backup` will be passed to
+ `post_update_backup_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_update_backup_schedule(
+ self,
+ request: gsad_backup_schedule.UpdateBackupScheduleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup_schedule.UpdateBackupScheduleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for update_backup_schedule
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_update_backup_schedule(
+ self, response: gsad_backup_schedule.BackupSchedule
+ ) -> gsad_backup_schedule.BackupSchedule:
+ """Post-rpc interceptor for update_backup_schedule
+
+ DEPRECATED. Please use the `post_update_backup_schedule_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_update_backup_schedule` interceptor runs
+ before the `post_update_backup_schedule_with_metadata` interceptor.
+ """
+ return response
+
+ def post_update_backup_schedule_with_metadata(
+ self,
+ response: gsad_backup_schedule.BackupSchedule,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup_schedule.BackupSchedule, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Post-rpc interceptor for update_backup_schedule
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_update_backup_schedule_with_metadata`
+ interceptor in new development instead of the `post_update_backup_schedule` interceptor.
+ When both interceptors are used, this `post_update_backup_schedule_with_metadata` interceptor runs after the
+ `post_update_backup_schedule` interceptor. The (possibly modified) response returned by
+ `post_update_backup_schedule` will be passed to
+ `post_update_backup_schedule_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_update_database(
+ self,
+ request: spanner_database_admin.UpdateDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.UpdateDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for update_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_update_database(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for update_database
+
+ DEPRECATED. Please use the `post_update_database_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_update_database` interceptor runs
+ before the `post_update_database_with_metadata` interceptor.
+ """
+ return response
+
+ def post_update_database_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for update_database
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_update_database_with_metadata`
+ interceptor in new development instead of the `post_update_database` interceptor.
+ When both interceptors are used, this `post_update_database_with_metadata` interceptor runs after the
+ `post_update_database` interceptor. The (possibly modified) response returned by
+ `post_update_database` will be passed to
+ `post_update_database_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_update_database_ddl(
+ self,
+ request: spanner_database_admin.UpdateDatabaseDdlRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.UpdateDatabaseDdlRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for update_database_ddl
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_update_database_ddl(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for update_database_ddl
+
+ DEPRECATED. Please use the `post_update_database_ddl_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_update_database_ddl` interceptor runs
+ before the `post_update_database_ddl_with_metadata` interceptor.
+ """
+ return response
+
+ def post_update_database_ddl_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for update_database_ddl
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_update_database_ddl_with_metadata`
+ interceptor in new development instead of the `post_update_database_ddl` interceptor.
+ When both interceptors are used, this `post_update_database_ddl_with_metadata` interceptor runs after the
+ `post_update_database_ddl` interceptor. The (possibly modified) response returned by
+ `post_update_database_ddl` will be passed to
+ `post_update_database_ddl_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_cancel_operation(
+ self,
+ request: operations_pb2.CancelOperationRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for cancel_operation
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_cancel_operation(self, response: None) -> None:
+ """Post-rpc interceptor for cancel_operation
+
+ Override in a subclass to manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code.
+ """
+ return response
+
+ def pre_delete_operation(
+ self,
+ request: operations_pb2.DeleteOperationRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for delete_operation
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_delete_operation(self, response: None) -> None:
+ """Post-rpc interceptor for delete_operation
+
+ Override in a subclass to manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code.
+ """
+ return response
+
+ def pre_get_operation(
+ self,
+ request: operations_pb2.GetOperationRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for get_operation
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_operation(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for get_operation
+
+ Override in a subclass to manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code.
+ """
+ return response
+
+ def pre_list_operations(
+ self,
+ request: operations_pb2.ListOperationsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for list_operations
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_operations(
+ self, response: operations_pb2.ListOperationsResponse
+ ) -> operations_pb2.ListOperationsResponse:
+ """Post-rpc interceptor for list_operations
+
+ Override in a subclass to manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code.
+ """
+ return response
+
+
+@dataclasses.dataclass
+class DatabaseAdminRestStub:
+ _session: AuthorizedSession
+ _host: str
+ _interceptor: DatabaseAdminRestInterceptor
+
+
+class DatabaseAdminRestTransport(_BaseDatabaseAdminRestTransport):
+ """REST backend synchronous transport for DatabaseAdmin.
+
+ Cloud Spanner Database Admin API
+
+ The Cloud Spanner Database Admin API can be used to:
+
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends JSON representations of protocol buffers over HTTP/1.1
+ """
+
+ def __init__(
+ self,
+ *,
+ host: str = "spanner.googleapis.com",
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
+ url_scheme: str = "https",
+ interceptor: Optional[DatabaseAdminRestInterceptor] = None,
+ api_audience: Optional[str] = None,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]):
+ The hostname to connect to (default: 'spanner.googleapis.com').
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
+ certificate to configure mutual TLS HTTP channel. It is ignored
+ if ``channel`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you are developing
+ your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
+ url_scheme: the protocol scheme for the API endpoint. Normally
+ "https", but for testing or local servers,
+ "http" can be specified.
+ """
+ # Run the base constructor
+ # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
+ # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
+ # credentials object
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
+ url_scheme=url_scheme,
+ api_audience=api_audience,
+ )
+ self._session = AuthorizedSession(
+ self._credentials, default_host=self.DEFAULT_HOST
+ )
+ self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None
+ if client_cert_source_for_mtls:
+ self._session.configure_mtls_channel(client_cert_source_for_mtls)
+ self._interceptor = interceptor or DatabaseAdminRestInterceptor()
+ self._prep_wrapped_messages(client_info)
+
+ @property
+ def operations_client(self) -> operations_v1.AbstractOperationsClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ http_options: Dict[str, List[Dict[str, str]]] = {
+ "google.longrunning.Operations.CancelOperation": [
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}:cancel",
+ },
+ ],
+ "google.longrunning.Operations.DeleteOperation": [
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}",
+ },
+ ],
+ "google.longrunning.Operations.GetOperation": [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}",
+ },
+ ],
+ "google.longrunning.Operations.ListOperations": [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations}",
+ },
+ ],
+ }
+
+ rest_transport = operations_v1.OperationsRestTransport(
+ host=self._host,
+ # use the credentials which are saved
+ credentials=self._credentials,
+ scopes=self._scopes,
+ http_options=http_options,
+ path_prefix="v1",
+ )
+
+ self._operations_client = operations_v1.AbstractOperationsClient(
+ transport=rest_transport
+ )
+
+ # Return the client from cache.
+ return self._operations_client
+
+ class _AddSplitPoints(
+ _BaseDatabaseAdminRestTransport._BaseAddSplitPoints, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.AddSplitPoints")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.AddSplitPointsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.AddSplitPointsResponse:
+ r"""Call the add split points method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.AddSplitPointsRequest):
+ The request object. The request for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.AddSplitPointsResponse:
+ The response for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_add_split_points(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.AddSplitPoints",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "AddSplitPoints",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._AddSplitPoints._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.AddSplitPointsResponse()
+ pb_resp = spanner_database_admin.AddSplitPointsResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_add_split_points(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_add_split_points_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.AddSplitPointsResponse.to_json(response)
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.add_split_points",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "AddSplitPoints",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _CopyBackup(
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CopyBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.CopyBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the copy backup method over HTTP.
+
+ Args:
+ request (~.backup.CopyBackupRequest):
+ The request object. The request for
+ [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_copy_backup(request, metadata)
+ transcoded_request = (
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_transcoded_request(
+ http_options, request
+ )
+ )
+
+ body = (
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_request_body_json(
+ transcoded_request
+ )
+ )
+
+ # Jsonify the query params
+ query_params = (
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_query_params_json(
+ transcoded_request
+ )
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CopyBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CopyBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CopyBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_copy_backup(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_copy_backup_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.copy_backup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CopyBackup",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _CreateBackup(
+ _BaseDatabaseAdminRestTransport._BaseCreateBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CreateBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: gsad_backup.CreateBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the create backup method over HTTP.
+
+ Args:
+ request (~.gsad_backup.CreateBackupRequest):
+ The request object. The request for
+ [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_create_backup(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CreateBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CreateBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_create_backup(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_create_backup_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.create_backup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateBackup",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _CreateBackupSchedule(
+ _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CreateBackupSchedule")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: gsad_backup_schedule.CreateBackupScheduleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Call the create backup schedule method over HTTP.
+
+ Args:
+ request (~.gsad_backup_schedule.CreateBackupScheduleRequest):
+ The request object. The request for
+ [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.gsad_backup_schedule.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_create_backup_schedule(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CreateBackupSchedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateBackupSchedule",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CreateBackupSchedule._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = gsad_backup_schedule.BackupSchedule()
+ pb_resp = gsad_backup_schedule.BackupSchedule.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_create_backup_schedule(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_create_backup_schedule_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = gsad_backup_schedule.BackupSchedule.to_json(
+ response
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.create_backup_schedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateBackupSchedule",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _CreateDatabase(
+ _BaseDatabaseAdminRestTransport._BaseCreateDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CreateDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.CreateDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the create database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.CreateDatabaseRequest):
+ The request object. The request for
+ [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_create_database(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CreateDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CreateDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_create_database(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_create_database_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.create_database",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateDatabase",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _DeleteBackup(
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.DeleteBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.DeleteBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ):
+ r"""Call the delete backup method over HTTP.
+
+ Args:
+ request (~.backup.DeleteBackupRequest):
+ The request object. The request for
+ [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_delete_backup(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DeleteBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "DeleteBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._DeleteBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ class _DeleteBackupSchedule(
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.DeleteBackupSchedule")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup_schedule.DeleteBackupScheduleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ):
+ r"""Call the delete backup schedule method over HTTP.
+
+ Args:
+ request (~.backup_schedule.DeleteBackupScheduleRequest):
+ The request object. The request for
+ [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_delete_backup_schedule(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DeleteBackupSchedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "DeleteBackupSchedule",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._DeleteBackupSchedule._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ class _DropDatabase(
+ _BaseDatabaseAdminRestTransport._BaseDropDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.DropDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.DropDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ):
+ r"""Call the drop database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.DropDatabaseRequest):
+ The request object. The request for
+ [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_drop_database(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DropDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "DropDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._DropDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ class _GetBackup(
+ _BaseDatabaseAdminRestTransport._BaseGetBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.GetBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup.Backup:
+ r"""Call the get backup method over HTTP.
+
+ Args:
+ request (~.backup.GetBackupRequest):
+ The request object. The request for
+ [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup.Backup:
+ A backup of a Cloud Spanner database.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_backup(request, metadata)
+ transcoded_request = (
+ _BaseDatabaseAdminRestTransport._BaseGetBackup._get_transcoded_request(
+ http_options, request
+ )
+ )
+
+ # Jsonify the query params
+ query_params = (
+ _BaseDatabaseAdminRestTransport._BaseGetBackup._get_query_params_json(
+ transcoded_request
+ )
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup.Backup()
+ pb_resp = backup.Backup.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_backup(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_backup_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = backup.Backup.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_backup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetBackup",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _GetBackupSchedule(
+ _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetBackupSchedule")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup_schedule.GetBackupScheduleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup_schedule.BackupSchedule:
+ r"""Call the get backup schedule method over HTTP.
+
+ Args:
+ request (~.backup_schedule.GetBackupScheduleRequest):
+ The request object. The request for
+ [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup_schedule.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_backup_schedule(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetBackupSchedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetBackupSchedule",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetBackupSchedule._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup_schedule.BackupSchedule()
+ pb_resp = backup_schedule.BackupSchedule.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_backup_schedule(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_backup_schedule_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = backup_schedule.BackupSchedule.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_backup_schedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetBackupSchedule",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _GetDatabase(
+ _BaseDatabaseAdminRestTransport._BaseGetDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.GetDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.Database:
+ r"""Call the get database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.GetDatabaseRequest):
+ The request object. The request for
+ [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.Database:
+ A Cloud Spanner database.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_database(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = (
+ _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_query_params_json(
+ transcoded_request
+ )
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.Database()
+ pb_resp = spanner_database_admin.Database.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_database(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_database_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = spanner_database_admin.Database.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_database",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetDatabase",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _GetDatabaseDdl(
+ _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetDatabaseDdl")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.GetDatabaseDdlRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.GetDatabaseDdlResponse:
+ r"""Call the get database ddl method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.GetDatabaseDdlRequest):
+ The request object. The request for
+ [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.GetDatabaseDdlResponse:
+ The response for
+ [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_database_ddl(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetDatabaseDdl",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetDatabaseDdl",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetDatabaseDdl._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.GetDatabaseDdlResponse()
+ pb_resp = spanner_database_admin.GetDatabaseDdlResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_database_ddl(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_database_ddl_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.GetDatabaseDdlResponse.to_json(response)
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_database_ddl",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetDatabaseDdl",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _GetIamPolicy(
+ _BaseDatabaseAdminRestTransport._BaseGetIamPolicy, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetIamPolicy")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: iam_policy_pb2.GetIamPolicyRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
+ r"""Call the get iam policy method over HTTP.
+
+ Args:
+ request (~.iam_policy_pb2.GetIamPolicyRequest):
+ The request object. Request message for ``GetIamPolicy`` method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.policy_pb2.Policy:
+ An Identity and Access Management (IAM) policy, which
+ specifies access controls for Google Cloud resources.
+
+ A ``Policy`` is a collection of ``bindings``. A
+ ``binding`` binds one or more ``members``, or
+ principals, to a single ``role``. Principals can be user
+ accounts, service accounts, Google groups, and domains
+ (such as G Suite). A ``role`` is a named list of
+ permissions; each ``role`` can be an IAM predefined role
+ or a user-created custom role.
+
+ For some types of Google Cloud resources, a ``binding``
+ can also specify a ``condition``, which is a logical
+ expression that allows access to a resource only if the
+ expression evaluates to ``true``. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the `IAM
+ documentation `__.
+
+ **JSON example:**
+
+ ::
+
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
+
+ **YAML example:**
+
+ ::
+
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
+
+ For a description of IAM and its features, see the `IAM
+ documentation `__.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_iam_policy(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetIamPolicy",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetIamPolicy",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetIamPolicy._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = policy_pb2.Policy()
+ pb_resp = resp
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_iam_policy(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_iam_policy_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_iam_policy",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetIamPolicy",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _InternalUpdateGraphOperation(
+ _BaseDatabaseAdminRestTransport._BaseInternalUpdateGraphOperation,
+ DatabaseAdminRestStub,
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.InternalUpdateGraphOperation")
+
+ def __call__(
+ self,
+ request: spanner_database_admin.InternalUpdateGraphOperationRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.InternalUpdateGraphOperationResponse:
+ raise NotImplementedError(
+ "Method InternalUpdateGraphOperation is not available over REST transport"
+ )
+
+ class _ListBackupOperations(
+ _BaseDatabaseAdminRestTransport._BaseListBackupOperations, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListBackupOperations")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.ListBackupOperationsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup.ListBackupOperationsResponse:
+ r"""Call the list backup operations method over HTTP.
+
+ Args:
+ request (~.backup.ListBackupOperationsRequest):
+ The request object. The request for
+ [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup.ListBackupOperationsResponse:
+ The response for
+ [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListBackupOperations._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_backup_operations(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListBackupOperations._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListBackupOperations._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListBackupOperations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackupOperations",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListBackupOperations._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup.ListBackupOperationsResponse()
+ pb_resp = backup.ListBackupOperationsResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_backup_operations(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_backup_operations_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = backup.ListBackupOperationsResponse.to_json(
+ response
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_backup_operations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackupOperations",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListBackups(
+ _BaseDatabaseAdminRestTransport._BaseListBackups, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListBackups")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.ListBackupsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup.ListBackupsResponse:
+ r"""Call the list backups method over HTTP.
+
+ Args:
+ request (~.backup.ListBackupsRequest):
+ The request object. The request for
+ [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup.ListBackupsResponse:
+ The response for
+ [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListBackups._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_backups(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListBackups._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = (
+ _BaseDatabaseAdminRestTransport._BaseListBackups._get_query_params_json(
+ transcoded_request
+ )
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListBackups",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackups",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListBackups._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup.ListBackupsResponse()
+ pb_resp = backup.ListBackupsResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_backups(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_backups_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = backup.ListBackupsResponse.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_backups",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackups",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListBackupSchedules(
+ _BaseDatabaseAdminRestTransport._BaseListBackupSchedules, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListBackupSchedules")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup_schedule.ListBackupSchedulesRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup_schedule.ListBackupSchedulesResponse:
+ r"""Call the list backup schedules method over HTTP.
+
+ Args:
+ request (~.backup_schedule.ListBackupSchedulesRequest):
+ The request object. The request for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup_schedule.ListBackupSchedulesResponse:
+ The response for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListBackupSchedules._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_backup_schedules(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListBackupSchedules._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListBackupSchedules._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListBackupSchedules",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackupSchedules",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListBackupSchedules._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup_schedule.ListBackupSchedulesResponse()
+ pb_resp = backup_schedule.ListBackupSchedulesResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_backup_schedules(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_backup_schedules_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ backup_schedule.ListBackupSchedulesResponse.to_json(response)
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_backup_schedules",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackupSchedules",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListDatabaseOperations(
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations,
+ DatabaseAdminRestStub,
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListDatabaseOperations")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.ListDatabaseOperationsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.ListDatabaseOperationsResponse:
+ r"""Call the list database operations method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.ListDatabaseOperationsRequest):
+ The request object. The request for
+ [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.ListDatabaseOperationsResponse:
+ The response for
+ [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_database_operations(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListDatabaseOperations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabaseOperations",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListDatabaseOperations._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.ListDatabaseOperationsResponse()
+ pb_resp = spanner_database_admin.ListDatabaseOperationsResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_database_operations(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_database_operations_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.ListDatabaseOperationsResponse.to_json(
+ response
+ )
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_database_operations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabaseOperations",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListDatabaseRoles(
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListDatabaseRoles")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.ListDatabaseRolesRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.ListDatabaseRolesResponse:
+ r"""Call the list database roles method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.ListDatabaseRolesRequest):
+ The request object. The request for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.ListDatabaseRolesResponse:
+ The response for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_database_roles(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListDatabaseRoles",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabaseRoles",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListDatabaseRoles._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.ListDatabaseRolesResponse()
+ pb_resp = spanner_database_admin.ListDatabaseRolesResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_database_roles(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_database_roles_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.ListDatabaseRolesResponse.to_json(
+ response
+ )
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_database_roles",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabaseRoles",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListDatabases(
+ _BaseDatabaseAdminRestTransport._BaseListDatabases, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListDatabases")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.ListDatabasesRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.ListDatabasesResponse:
+ r"""Call the list databases method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.ListDatabasesRequest):
+ The request object. The request for
+ [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.ListDatabasesResponse:
+ The response for
+ [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListDatabases._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_databases(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListDatabases._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListDatabases._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListDatabases",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabases",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListDatabases._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.ListDatabasesResponse()
+ pb_resp = spanner_database_admin.ListDatabasesResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_databases(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_databases_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.ListDatabasesResponse.to_json(response)
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_databases",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabases",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _RestoreDatabase(
+ _BaseDatabaseAdminRestTransport._BaseRestoreDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.RestoreDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.RestoreDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the restore database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.RestoreDatabaseRequest):
+ The request object. The request for
+ [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_restore_database(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.RestoreDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "RestoreDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._RestoreDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_restore_database(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_restore_database_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.restore_database",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "RestoreDatabase",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _SetIamPolicy(
+ _BaseDatabaseAdminRestTransport._BaseSetIamPolicy, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.SetIamPolicy")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: iam_policy_pb2.SetIamPolicyRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
+ r"""Call the set iam policy method over HTTP.
+
+ Args:
+ request (~.iam_policy_pb2.SetIamPolicyRequest):
+ The request object. Request message for ``SetIamPolicy`` method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.policy_pb2.Policy:
+ An Identity and Access Management (IAM) policy, which
+ specifies access controls for Google Cloud resources.
+
+ A ``Policy`` is a collection of ``bindings``. A
+ ``binding`` binds one or more ``members``, or
+ principals, to a single ``role``. Principals can be user
+ accounts, service accounts, Google groups, and domains
+ (such as G Suite). A ``role`` is a named list of
+ permissions; each ``role`` can be an IAM predefined role
+ or a user-created custom role.
+
+ For some types of Google Cloud resources, a ``binding``
+ can also specify a ``condition``, which is a logical
+ expression that allows access to a resource only if the
+ expression evaluates to ``true``. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the `IAM
+ documentation `__.
+
+ **JSON example:**
+
+ ::
+
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
+
+ **YAML example:**
+
+ ::
+
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
+
+ For a description of IAM and its features, see the `IAM
+ documentation `__.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_set_iam_policy(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.SetIamPolicy",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "SetIamPolicy",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._SetIamPolicy._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = policy_pb2.Policy()
+ pb_resp = resp
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_set_iam_policy(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_set_iam_policy_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.set_iam_policy",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "SetIamPolicy",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _TestIamPermissions(
+ _BaseDatabaseAdminRestTransport._BaseTestIamPermissions, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.TestIamPermissions")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: iam_policy_pb2.TestIamPermissionsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> iam_policy_pb2.TestIamPermissionsResponse:
+ r"""Call the test iam permissions method over HTTP.
+
+ Args:
+ request (~.iam_policy_pb2.TestIamPermissionsRequest):
+ The request object. Request message for ``TestIamPermissions`` method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.iam_policy_pb2.TestIamPermissionsResponse:
+ Response message for ``TestIamPermissions`` method.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_test_iam_permissions(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.TestIamPermissions",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "TestIamPermissions",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._TestIamPermissions._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = iam_policy_pb2.TestIamPermissionsResponse()
+ pb_resp = resp
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_test_iam_permissions(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_test_iam_permissions_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.test_iam_permissions",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "TestIamPermissions",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _UpdateBackup(
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.UpdateBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: gsad_backup.UpdateBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup.Backup:
+ r"""Call the update backup method over HTTP.
+
+ Args:
+ request (~.gsad_backup.UpdateBackupRequest):
+ The request object. The request for
+ [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.gsad_backup.Backup:
+ A backup of a Cloud Spanner database.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_update_backup(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._UpdateBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = gsad_backup.Backup()
+ pb_resp = gsad_backup.Backup.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_update_backup(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_update_backup_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = gsad_backup.Backup.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_backup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateBackup",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _UpdateBackupSchedule(
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.UpdateBackupSchedule")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: gsad_backup_schedule.UpdateBackupScheduleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Call the update backup schedule method over HTTP.
+
+ Args:
+ request (~.gsad_backup_schedule.UpdateBackupScheduleRequest):
+ The request object. The request for
+ [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.gsad_backup_schedule.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_update_backup_schedule(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateBackupSchedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateBackupSchedule",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._UpdateBackupSchedule._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = gsad_backup_schedule.BackupSchedule()
+ pb_resp = gsad_backup_schedule.BackupSchedule.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_update_backup_schedule(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_update_backup_schedule_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = gsad_backup_schedule.BackupSchedule.to_json(
+ response
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_backup_schedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateBackupSchedule",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _UpdateDatabase(
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.UpdateDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.UpdateDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the update database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.UpdateDatabaseRequest):
+ The request object. The request for
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_update_database(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._UpdateDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_update_database(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_update_database_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_database",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateDatabase",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _UpdateDatabaseDdl(
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.UpdateDatabaseDdl")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.UpdateDatabaseDdlRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the update database ddl method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.UpdateDatabaseDdlRequest):
+ The request object. Enqueues the given DDL statements to be applied, in
+ order but not necessarily all at once, to the database
+ schema at some point (or points) in the future. The
+ server checks that the statements are executable
+ (syntactically valid, name tables that exist, etc.)
+ before enqueueing them, but they may still fail upon
+ later execution (e.g., if a statement from another batch
+ of statements is applied first and it conflicts in some
+ way, or if there is some data-related problem like a
+ ``NULL`` value in a column to which ``NOT NULL`` would
+ be added). If a statement fails, all subsequent
+ statements in the batch are automatically cancelled.
+
+ Each batch of statements is assigned a name which can be
+ used with the
+ [Operations][google.longrunning.Operations] API to
+ monitor progress. See the
+ [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
+ field for more details.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_update_database_ddl(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateDatabaseDdl",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateDatabaseDdl",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._UpdateDatabaseDdl._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_update_database_ddl(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_update_database_ddl_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_database_ddl",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateDatabaseDdl",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ @property
+ def add_split_points(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.AddSplitPointsRequest],
+ spanner_database_admin.AddSplitPointsResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._AddSplitPoints(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def copy_backup(
+ self,
+ ) -> Callable[[backup.CopyBackupRequest], operations_pb2.Operation]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CopyBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def create_backup(
+ self,
+ ) -> Callable[[gsad_backup.CreateBackupRequest], operations_pb2.Operation]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CreateBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def create_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.CreateBackupScheduleRequest],
+ gsad_backup_schedule.BackupSchedule,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CreateBackupSchedule(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def create_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.CreateDatabaseRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CreateDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def delete_backup(self) -> Callable[[backup.DeleteBackupRequest], empty_pb2.Empty]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._DeleteBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def delete_backup_schedule(
+ self,
+ ) -> Callable[[backup_schedule.DeleteBackupScheduleRequest], empty_pb2.Empty]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._DeleteBackupSchedule(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def drop_database(
+ self,
+ ) -> Callable[[spanner_database_admin.DropDatabaseRequest], empty_pb2.Empty]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._DropDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_backup(self) -> Callable[[backup.GetBackupRequest], backup.Backup]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.GetBackupScheduleRequest], backup_schedule.BackupSchedule
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetBackupSchedule(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.GetDatabaseRequest], spanner_database_admin.Database
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_database_ddl(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.GetDatabaseDdlRequest],
+ spanner_database_admin.GetDatabaseDdlResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetDatabaseDdl(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_iam_policy(
+ self,
+ ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def internal_update_graph_operation(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.InternalUpdateGraphOperationRequest],
+ spanner_database_admin.InternalUpdateGraphOperationResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._InternalUpdateGraphOperation(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_backup_operations(
+ self,
+ ) -> Callable[
+ [backup.ListBackupOperationsRequest], backup.ListBackupOperationsResponse
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListBackupOperations(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_backups(
+ self,
+ ) -> Callable[[backup.ListBackupsRequest], backup.ListBackupsResponse]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListBackups(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_backup_schedules(
+ self,
+ ) -> Callable[
+ [backup_schedule.ListBackupSchedulesRequest],
+ backup_schedule.ListBackupSchedulesResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListBackupSchedules(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_database_operations(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabaseOperationsRequest],
+ spanner_database_admin.ListDatabaseOperationsResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListDatabaseOperations(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_database_roles(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabaseRolesRequest],
+ spanner_database_admin.ListDatabaseRolesResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListDatabaseRoles(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_databases(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabasesRequest],
+ spanner_database_admin.ListDatabasesResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListDatabases(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def restore_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.RestoreDatabaseRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._RestoreDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def set_iam_policy(
+ self,
+ ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def test_iam_permissions(
+ self,
+ ) -> Callable[
+ [iam_policy_pb2.TestIamPermissionsRequest],
+ iam_policy_pb2.TestIamPermissionsResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def update_backup(
+ self,
+ ) -> Callable[[gsad_backup.UpdateBackupRequest], gsad_backup.Backup]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._UpdateBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def update_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.UpdateBackupScheduleRequest],
+ gsad_backup_schedule.BackupSchedule,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._UpdateBackupSchedule(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def update_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._UpdateDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def update_database_ddl(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseDdlRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._UpdateDatabaseDdl(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def cancel_operation(self):
+ return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore
+
+ class _CancelOperation(
+ _BaseDatabaseAdminRestTransport._BaseCancelOperation, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CancelOperation")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: operations_pb2.CancelOperationRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Call the cancel operation method over HTTP.
+
+ Args:
+ request (operations_pb2.CancelOperationRequest):
+ The request object for CancelOperation method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCancelOperation._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_cancel_operation(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseCancelOperation._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseCancelOperation._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CancelOperation",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CancelOperation",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CancelOperation._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ return self._interceptor.post_cancel_operation(None)
+
+ @property
+ def delete_operation(self):
+ return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore
+
+ class _DeleteOperation(
+ _BaseDatabaseAdminRestTransport._BaseDeleteOperation, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.DeleteOperation")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: operations_pb2.DeleteOperationRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Call the delete operation method over HTTP.
+
+ Args:
+ request (operations_pb2.DeleteOperationRequest):
+ The request object for DeleteOperation method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseDeleteOperation._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_delete_operation(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseDeleteOperation._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseDeleteOperation._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DeleteOperation",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "DeleteOperation",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._DeleteOperation._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ return self._interceptor.post_delete_operation(None)
+
+ @property
+ def get_operation(self):
+ return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore
+
+ class _GetOperation(
+ _BaseDatabaseAdminRestTransport._BaseGetOperation, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetOperation")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: operations_pb2.GetOperationRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the get operation method over HTTP.
+
+ Args:
+ request (operations_pb2.GetOperationRequest):
+ The request object for GetOperation method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ operations_pb2.Operation: Response from GetOperation method.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetOperation._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_operation(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetOperation._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseGetOperation._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetOperation",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetOperation",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetOperation._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ content = response.content.decode("utf-8")
+ resp = operations_pb2.Operation()
+ resp = json_format.Parse(content, resp)
+ resp = self._interceptor.post_get_operation(resp)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminAsyncClient.GetOperation",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetOperation",
+ "httpResponse": http_response,
+ "metadata": http_response["headers"],
+ },
+ )
+ return resp
+
+ @property
+ def list_operations(self):
+ return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore
+
+ class _ListOperations(
+ _BaseDatabaseAdminRestTransport._BaseListOperations, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListOperations")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: operations_pb2.ListOperationsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.ListOperationsResponse:
+ r"""Call the list operations method over HTTP.
+
+ Args:
+ request (operations_pb2.ListOperationsRequest):
+ The request object for ListOperations method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ operations_pb2.ListOperationsResponse: Response from ListOperations method.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListOperations._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_operations(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListOperations._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListOperations._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListOperations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListOperations",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListOperations._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ content = response.content.decode("utf-8")
+ resp = operations_pb2.ListOperationsResponse()
+ resp = json_format.Parse(content, resp)
+ resp = self._interceptor.post_list_operations(resp)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminAsyncClient.ListOperations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListOperations",
+ "httpResponse": http_response,
+ "metadata": http_response["headers"],
+ },
+ )
+ return resp
+
+ @property
+ def kind(self) -> str:
+ return "rest"
+
+ def close(self):
+ self._session.close()
+
+
+__all__ = ("DatabaseAdminRestTransport",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest_base.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest_base.py
new file mode 100644
index 0000000000..d0ee0a2cbb
--- /dev/null
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest_base.py
@@ -0,0 +1,1654 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import json # type: ignore
+from google.api_core import path_template
+from google.api_core import gapic_v1
+
+from google.protobuf import json_format
+from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
+
+import re
+from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
+
+
+from google.cloud.spanner_admin_database_v1.types import backup
+from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
+from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+
+
+class _BaseDatabaseAdminRestTransport(DatabaseAdminTransport):
+ """Base REST backend transport for DatabaseAdmin.
+
+ Note: This class is not meant to be used directly. Use its sync and
+ async sub-classes instead.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends JSON representations of protocol buffers over HTTP/1.1
+ """
+
+ def __init__(
+ self,
+ *,
+ host: str = "spanner.googleapis.com",
+ credentials: Optional[Any] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
+ url_scheme: str = "https",
+ api_audience: Optional[str] = None,
+ ) -> None:
+ """Instantiate the transport.
+ Args:
+ host (Optional[str]):
+ The hostname to connect to (default: 'spanner.googleapis.com').
+ credentials (Optional[Any]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you are developing
+ your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
+ url_scheme: the protocol scheme for the API endpoint. Normally
+ "https", but for testing or local servers,
+ "http" can be specified.
+ """
+ # Run the base constructor
+ maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host)
+ if maybe_url_match is None:
+ raise ValueError(
+ f"Unexpected hostname structure: {host}"
+ ) # pragma: NO COVER
+
+ url_match_items = maybe_url_match.groupdict()
+
+ host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
+
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
+ api_audience=api_audience,
+ )
+
+ class _BaseAddSplitPoints:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{database=projects/*/instances/*/databases/*}:addSplitPoints",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.AddSplitPointsRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCopyBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*}/backups:copy",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.CopyBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCreateBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "backupId": "",
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*}/backups",
+ "body": "backup",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = gsad_backup.CreateBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCreateBackupSchedule:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "backupScheduleId": "",
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules",
+ "body": "backup_schedule",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = gsad_backup_schedule.CreateBackupScheduleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCreateDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*}/databases",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.CreateDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseDeleteBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.DeleteBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseDeleteBackupSchedule:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup_schedule.DeleteBackupScheduleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseDropDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "delete",
+ "uri": "/v1/{database=projects/*/instances/*/databases/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.DropDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.GetBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetBackupSchedule:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup_schedule.GetBackupScheduleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.GetDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetDatabaseDdl:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{database=projects/*/instances/*/databases/*}/ddl",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.GetDatabaseDdlRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetIamPolicy:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/backups/*}:getIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:getIamPolicy",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = request
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseInternalUpdateGraphOperation:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ class _BaseListBackupOperations:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*}/backupOperations",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.ListBackupOperationsRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListBackupOperations._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListBackups:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*}/backups",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.ListBackupsRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListBackups._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListBackupSchedules:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup_schedule.ListBackupSchedulesRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListBackupSchedules._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListDatabaseOperations:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*}/databaseOperations",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.ListDatabaseOperationsRequest.pb(
+ request
+ )
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListDatabaseRoles:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*/databases/*}/databaseRoles",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.ListDatabaseRolesRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListDatabases:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*}/databases",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.ListDatabasesRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListDatabases._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseRestoreDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*}/databases:restore",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.RestoreDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseSetIamPolicy:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/backups/*}:setIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:setIamPolicy",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = request
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseTestIamPermissions:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/backups/*}:testIamPermissions",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:testIamPermissions",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*/databaseRoles/*}:testIamPermissions",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = request
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseUpdateBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "updateMask": {},
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "patch",
+ "uri": "/v1/{backup.name=projects/*/instances/*/backups/*}",
+ "body": "backup",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = gsad_backup.UpdateBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseUpdateBackupSchedule:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "updateMask": {},
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "patch",
+ "uri": "/v1/{backup_schedule.name=projects/*/instances/*/databases/*/backupSchedules/*}",
+ "body": "backup_schedule",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = gsad_backup_schedule.UpdateBackupScheduleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseUpdateDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "updateMask": {},
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "patch",
+ "uri": "/v1/{database.name=projects/*/instances/*/databases/*}",
+ "body": "database",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.UpdateDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseUpdateDatabaseDdl:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "patch",
+ "uri": "/v1/{database=projects/*/instances/*/databases/*}/ddl",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.UpdateDatabaseDdlRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCancelOperation:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}:cancel",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ request_kwargs = json_format.MessageToDict(request)
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
+ return query_params
+
+ class _BaseDeleteOperation:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ request_kwargs = json_format.MessageToDict(request)
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
+ return query_params
+
+ class _BaseGetOperation:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ request_kwargs = json_format.MessageToDict(request)
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
+ return query_params
+
+ class _BaseListOperations:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ request_kwargs = json_format.MessageToDict(request)
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
+ return query_params
+
+
+__all__ = ("_BaseDatabaseAdminRestTransport",)
diff --git a/google/cloud/spanner_admin_database_v1/types/__init__.py b/google/cloud/spanner_admin_database_v1/types/__init__.py
index 1c31fe536e..ca79ddec90 100644
--- a/google/cloud/spanner_admin_database_v1/types/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/types/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,32 +16,58 @@
from .backup import (
Backup,
BackupInfo,
+ BackupInstancePartition,
+ CopyBackupEncryptionConfig,
+ CopyBackupMetadata,
+ CopyBackupRequest,
CreateBackupEncryptionConfig,
CreateBackupMetadata,
CreateBackupRequest,
DeleteBackupRequest,
+ FullBackupSpec,
GetBackupRequest,
+ IncrementalBackupSpec,
ListBackupOperationsRequest,
ListBackupOperationsResponse,
ListBackupsRequest,
ListBackupsResponse,
UpdateBackupRequest,
)
+from .backup_schedule import (
+ BackupSchedule,
+ BackupScheduleSpec,
+ CreateBackupScheduleRequest,
+ CrontabSpec,
+ DeleteBackupScheduleRequest,
+ GetBackupScheduleRequest,
+ ListBackupSchedulesRequest,
+ ListBackupSchedulesResponse,
+ UpdateBackupScheduleRequest,
+)
from .common import (
EncryptionConfig,
EncryptionInfo,
OperationProgress,
+ DatabaseDialect,
)
from .spanner_database_admin import (
+ AddSplitPointsRequest,
+ AddSplitPointsResponse,
CreateDatabaseMetadata,
CreateDatabaseRequest,
Database,
+ DatabaseRole,
+ DdlStatementActionInfo,
DropDatabaseRequest,
GetDatabaseDdlRequest,
GetDatabaseDdlResponse,
GetDatabaseRequest,
+ InternalUpdateGraphOperationRequest,
+ InternalUpdateGraphOperationResponse,
ListDatabaseOperationsRequest,
ListDatabaseOperationsResponse,
+ ListDatabaseRolesRequest,
+ ListDatabaseRolesResponse,
ListDatabasesRequest,
ListDatabasesResponse,
OptimizeRestoredDatabaseMetadata,
@@ -49,36 +75,63 @@
RestoreDatabaseMetadata,
RestoreDatabaseRequest,
RestoreInfo,
+ SplitPoints,
UpdateDatabaseDdlMetadata,
UpdateDatabaseDdlRequest,
+ UpdateDatabaseMetadata,
+ UpdateDatabaseRequest,
RestoreSourceType,
)
__all__ = (
"Backup",
"BackupInfo",
+ "BackupInstancePartition",
+ "CopyBackupEncryptionConfig",
+ "CopyBackupMetadata",
+ "CopyBackupRequest",
"CreateBackupEncryptionConfig",
"CreateBackupMetadata",
"CreateBackupRequest",
"DeleteBackupRequest",
+ "FullBackupSpec",
"GetBackupRequest",
+ "IncrementalBackupSpec",
"ListBackupOperationsRequest",
"ListBackupOperationsResponse",
"ListBackupsRequest",
"ListBackupsResponse",
"UpdateBackupRequest",
+ "BackupSchedule",
+ "BackupScheduleSpec",
+ "CreateBackupScheduleRequest",
+ "CrontabSpec",
+ "DeleteBackupScheduleRequest",
+ "GetBackupScheduleRequest",
+ "ListBackupSchedulesRequest",
+ "ListBackupSchedulesResponse",
+ "UpdateBackupScheduleRequest",
"EncryptionConfig",
"EncryptionInfo",
"OperationProgress",
+ "DatabaseDialect",
+ "AddSplitPointsRequest",
+ "AddSplitPointsResponse",
"CreateDatabaseMetadata",
"CreateDatabaseRequest",
"Database",
+ "DatabaseRole",
+ "DdlStatementActionInfo",
"DropDatabaseRequest",
"GetDatabaseDdlRequest",
"GetDatabaseDdlResponse",
"GetDatabaseRequest",
+ "InternalUpdateGraphOperationRequest",
+ "InternalUpdateGraphOperationResponse",
"ListDatabaseOperationsRequest",
"ListDatabaseOperationsResponse",
+ "ListDatabaseRolesRequest",
+ "ListDatabaseRolesResponse",
"ListDatabasesRequest",
"ListDatabasesResponse",
"OptimizeRestoredDatabaseMetadata",
@@ -86,7 +139,10 @@
"RestoreDatabaseMetadata",
"RestoreDatabaseRequest",
"RestoreInfo",
+ "SplitPoints",
"UpdateDatabaseDdlMetadata",
"UpdateDatabaseDdlRequest",
+ "UpdateDatabaseMetadata",
+ "UpdateDatabaseRequest",
"RestoreSourceType",
)
diff --git a/google/cloud/spanner_admin_database_v1/types/backup.py b/google/cloud/spanner_admin_database_v1/types/backup.py
index 0ddc815570..da236fb4ff 100644
--- a/google/cloud/spanner_admin_database_v1/types/backup.py
+++ b/google/cloud/spanner_admin_database_v1/types/backup.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from __future__ import annotations
+
+from typing import MutableMapping, MutableSequence
+
import proto # type: ignore
from google.cloud.spanner_admin_database_v1.types import common
@@ -27,6 +31,8 @@
"Backup",
"CreateBackupRequest",
"CreateBackupMetadata",
+ "CopyBackupRequest",
+ "CopyBackupMetadata",
"UpdateBackupRequest",
"GetBackupRequest",
"DeleteBackupRequest",
@@ -36,12 +42,17 @@
"ListBackupOperationsResponse",
"BackupInfo",
"CreateBackupEncryptionConfig",
+ "CopyBackupEncryptionConfig",
+ "FullBackupSpec",
+ "IncrementalBackupSpec",
+ "BackupInstancePartition",
},
)
class Backup(proto.Message):
r"""A backup of a Cloud Spanner database.
+
Attributes:
database (str):
Required for the
@@ -89,9 +100,33 @@ class Backup(proto.Message):
equivalent to the ``create_time``.
size_bytes (int):
Output only. Size of the backup in bytes.
+ freeable_size_bytes (int):
+ Output only. The number of bytes that will be
+ freed by deleting this backup. This value will
+ be zero if, for example, this backup is part of
+ an incremental backup chain and younger backups
+ in the chain require that we keep its data. For
+ backups not in an incremental backup chain, this
+ is always the size of the backup. This value may
+ change if backups on the same chain get created,
+ deleted or expired.
+ exclusive_size_bytes (int):
+ Output only. For a backup in an incremental
+ backup chain, this is the storage space needed
+ to keep the data that has changed since the
+ previous backup. For all other backups, this is
+ always the size of the backup. This value may
+ change if backups on the same chain get deleted
+ or expired.
+
+ This field can be used to calculate the total
+ storage space used by a set of backups. For
+ example, the total space used by all backups of
+ a database can be computed by summing up this
+ field.
state (google.cloud.spanner_admin_database_v1.types.Backup.State):
Output only. The current state of the backup.
- referencing_databases (Sequence[str]):
+ referencing_databases (MutableSequence[str]):
Output only. The names of the restored databases that
reference the backup. The database names are of the form
``projects//instances//databases/``.
@@ -103,26 +138,181 @@ class Backup(proto.Message):
encryption_info (google.cloud.spanner_admin_database_v1.types.EncryptionInfo):
Output only. The encryption information for
the backup.
+ encryption_information (MutableSequence[google.cloud.spanner_admin_database_v1.types.EncryptionInfo]):
+ Output only. The encryption information for the backup,
+ whether it is protected by one or more KMS keys. The
+ information includes all Cloud KMS key versions used to
+ encrypt the backup. The
+ ``encryption_status' field inside of each``\ EncryptionInfo\`
+ is not populated. At least one of the key versions must be
+ available for the backup to be restored. If a key version is
+ revoked in the middle of a restore, the restore behavior is
+ undefined.
+ database_dialect (google.cloud.spanner_admin_database_v1.types.DatabaseDialect):
+ Output only. The database dialect information
+ for the backup.
+ referencing_backups (MutableSequence[str]):
+ Output only. The names of the destination backups being
+ created by copying this source backup. The backup names are
+ of the form
+ ``projects//instances//backups/``.
+ Referencing backups may exist in different instances. The
+ existence of any referencing backup prevents the backup from
+ being deleted. When the copy operation is done (either
+ successfully completed or cancelled or the destination
+ backup is deleted), the reference to the backup is removed.
+ max_expire_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. The max allowed expiration time of the backup,
+ with microseconds granularity. A backup's expiration time
+ can be configured in multiple APIs: CreateBackup,
+ UpdateBackup, CopyBackup. When updating or copying an
+ existing backup, the expiration time specified must be less
+ than ``Backup.max_expire_time``.
+ backup_schedules (MutableSequence[str]):
+ Output only. List of backup schedule URIs
+ that are associated with creating this backup.
+ This is only applicable for scheduled backups,
+ and is empty for on-demand backups.
+
+ To optimize for storage, whenever possible,
+ multiple schedules are collapsed together to
+ create one backup. In such cases, this field
+ captures the list of all backup schedule URIs
+ that are associated with creating this backup.
+ If collapsing is not done, then this field
+ captures the single backup schedule URI
+ associated with creating this backup.
+ incremental_backup_chain_id (str):
+ Output only. Populated only for backups in an incremental
+ backup chain. Backups share the same chain id if and only if
+ they belong to the same incremental backup chain. Use this
+ field to determine which backups are part of the same
+ incremental backup chain. The ordering of backups in the
+ chain can be determined by ordering the backup
+ ``version_time``.
+ oldest_version_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Data deleted at a time older
+ than this is guaranteed not to be retained in
+ order to support this backup. For a backup in an
+ incremental backup chain, this is the version
+ time of the oldest backup that exists or ever
+ existed in the chain. For all other backups,
+ this is the version time of the backup. This
+ field can be used to understand what data is
+ being retained by the backup system.
+ instance_partitions (MutableSequence[google.cloud.spanner_admin_database_v1.types.BackupInstancePartition]):
+ Output only. The instance partition(s) storing the backup.
+
+ This is the same as the list of the instance partition(s)
+ that the database had footprint in at the backup's
+ ``version_time``.
"""
class State(proto.Enum):
- r"""Indicates the current state of the backup."""
+ r"""Indicates the current state of the backup.
+
+ Values:
+ STATE_UNSPECIFIED (0):
+ Not specified.
+ CREATING (1):
+ The pending backup is still being created. Operations on the
+ backup may fail with ``FAILED_PRECONDITION`` in this state.
+ READY (2):
+ The backup is complete and ready for use.
+ """
STATE_UNSPECIFIED = 0
CREATING = 1
READY = 2
- database = proto.Field(proto.STRING, number=2,)
- version_time = proto.Field(
- proto.MESSAGE, number=9, message=timestamp_pb2.Timestamp,
+ database: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ version_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=9,
+ message=timestamp_pb2.Timestamp,
+ )
+ expire_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=timestamp_pb2.Timestamp,
+ )
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ create_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=timestamp_pb2.Timestamp,
+ )
+ size_bytes: int = proto.Field(
+ proto.INT64,
+ number=5,
+ )
+ freeable_size_bytes: int = proto.Field(
+ proto.INT64,
+ number=15,
+ )
+ exclusive_size_bytes: int = proto.Field(
+ proto.INT64,
+ number=16,
+ )
+ state: State = proto.Field(
+ proto.ENUM,
+ number=6,
+ enum=State,
+ )
+ referencing_databases: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=7,
+ )
+ encryption_info: common.EncryptionInfo = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ message=common.EncryptionInfo,
+ )
+ encryption_information: MutableSequence[
+ common.EncryptionInfo
+ ] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=13,
+ message=common.EncryptionInfo,
+ )
+ database_dialect: common.DatabaseDialect = proto.Field(
+ proto.ENUM,
+ number=10,
+ enum=common.DatabaseDialect,
+ )
+ referencing_backups: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=11,
+ )
+ max_expire_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=12,
+ message=timestamp_pb2.Timestamp,
)
- expire_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
- name = proto.Field(proto.STRING, number=1,)
- create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
- size_bytes = proto.Field(proto.INT64, number=5,)
- state = proto.Field(proto.ENUM, number=6, enum=State,)
- referencing_databases = proto.RepeatedField(proto.STRING, number=7,)
- encryption_info = proto.Field(
- proto.MESSAGE, number=8, message=common.EncryptionInfo,
+ backup_schedules: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=14,
+ )
+ incremental_backup_chain_id: str = proto.Field(
+ proto.STRING,
+ number=17,
+ )
+ oldest_version_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=18,
+ message=timestamp_pb2.Timestamp,
+ )
+ instance_partitions: MutableSequence[
+ "BackupInstancePartition"
+ ] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=19,
+ message="BackupInstancePartition",
)
@@ -154,11 +344,23 @@ class CreateBackupRequest(proto.Message):
= ``USE_DATABASE_ENCRYPTION``.
"""
- parent = proto.Field(proto.STRING, number=1,)
- backup_id = proto.Field(proto.STRING, number=2,)
- backup = proto.Field(proto.MESSAGE, number=3, message="Backup",)
- encryption_config = proto.Field(
- proto.MESSAGE, number=4, message="CreateBackupEncryptionConfig",
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ backup_id: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ backup: "Backup" = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message="Backup",
+ )
+ encryption_config: "CreateBackupEncryptionConfig" = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message="CreateBackupEncryptionConfig",
)
@@ -193,10 +395,138 @@ class CreateBackupMetadata(proto.Message):
1, corresponding to ``Code.CANCELLED``.
"""
- name = proto.Field(proto.STRING, number=1,)
- database = proto.Field(proto.STRING, number=2,)
- progress = proto.Field(proto.MESSAGE, number=3, message=common.OperationProgress,)
- cancel_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ database: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ progress: common.OperationProgress = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=common.OperationProgress,
+ )
+ cancel_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=timestamp_pb2.Timestamp,
+ )
+
+
+class CopyBackupRequest(proto.Message):
+ r"""The request for
+ [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
+
+ Attributes:
+ parent (str):
+ Required. The name of the destination instance that will
+ contain the backup copy. Values are of the form:
+ ``projects//instances/``.
+ backup_id (str):
+ Required. The id of the backup copy. The ``backup_id``
+ appended to ``parent`` forms the full backup_uri of the form
+ ``projects//instances//backups/``.
+ source_backup (str):
+ Required. The source backup to be copied. The source backup
+ needs to be in READY state for it to be copied. Once
+ CopyBackup is in progress, the source backup cannot be
+ deleted or cleaned up on expiration until CopyBackup is
+ finished. Values are of the form:
+ ``projects//instances//backups/``.
+ expire_time (google.protobuf.timestamp_pb2.Timestamp):
+ Required. The expiration time of the backup in microsecond
+ granularity. The expiration time must be at least 6 hours
+ and at most 366 days from the ``create_time`` of the source
+ backup. Once the ``expire_time`` has passed, the backup is
+ eligible to be automatically deleted by Cloud Spanner to
+ free the resources used by the backup.
+ encryption_config (google.cloud.spanner_admin_database_v1.types.CopyBackupEncryptionConfig):
+ Optional. The encryption configuration used to encrypt the
+ backup. If this field is not specified, the backup will use
+ the same encryption configuration as the source backup by
+ default, namely
+ [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
+ = ``USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION``.
+ """
+
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ backup_id: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ source_backup: str = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+ expire_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=timestamp_pb2.Timestamp,
+ )
+ encryption_config: "CopyBackupEncryptionConfig" = proto.Field(
+ proto.MESSAGE,
+ number=5,
+ message="CopyBackupEncryptionConfig",
+ )
+
+
+class CopyBackupMetadata(proto.Message):
+ r"""Metadata type for the operation returned by
+ [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
+
+ Attributes:
+ name (str):
+ The name of the backup being created through the copy
+ operation. Values are of the form
+ ``projects//instances//backups/``.
+ source_backup (str):
+ The name of the source backup that is being copied. Values
+ are of the form
+ ``projects//instances//backups/``.
+ progress (google.cloud.spanner_admin_database_v1.types.OperationProgress):
+ The progress of the
+ [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
+ operation.
+ cancel_time (google.protobuf.timestamp_pb2.Timestamp):
+ The time at which cancellation of CopyBackup operation was
+ received.
+ [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
+ starts asynchronous cancellation on a long-running
+ operation. The server makes a best effort to cancel the
+ operation, but success is not guaranteed. Clients can use
+ [Operations.GetOperation][google.longrunning.Operations.GetOperation]
+ or other methods to check whether the cancellation succeeded
+ or whether the operation completed despite cancellation. On
+ successful cancellation, the operation is not deleted;
+ instead, it becomes an operation with an
+ [Operation.error][google.longrunning.Operation.error] value
+ with a [google.rpc.Status.code][google.rpc.Status.code] of
+ 1, corresponding to ``Code.CANCELLED``.
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ source_backup: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ progress: common.OperationProgress = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=common.OperationProgress,
+ )
+ cancel_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=timestamp_pb2.Timestamp,
+ )
class UpdateBackupRequest(proto.Message):
@@ -210,7 +540,7 @@ class UpdateBackupRequest(proto.Message):
required. Other fields are ignored. Update is only supported
for the following fields:
- - ``backup.expire_time``.
+ - ``backup.expire_time``.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. A mask specifying which fields (e.g.
``expire_time``) in the Backup resource should be updated.
@@ -220,9 +550,15 @@ class UpdateBackupRequest(proto.Message):
accidentally by clients that do not know about them.
"""
- backup = proto.Field(proto.MESSAGE, number=1, message="Backup",)
- update_mask = proto.Field(
- proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
+ backup: "Backup" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="Backup",
+ )
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=field_mask_pb2.FieldMask,
)
@@ -236,7 +572,10 @@ class GetBackupRequest(proto.Message):
``projects//instances//backups/``.
"""
- name = proto.Field(proto.STRING, number=1,)
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
class DeleteBackupRequest(proto.Message):
@@ -250,7 +589,10 @@ class DeleteBackupRequest(proto.Message):
``projects//instances//backups/``.
"""
- name = proto.Field(proto.STRING, number=1,)
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
class ListBackupsRequest(proto.Message):
@@ -275,16 +617,17 @@ class ListBackupsRequest(proto.Message):
[Backup][google.spanner.admin.database.v1.Backup] are
eligible for filtering:
- - ``name``
- - ``database``
- - ``state``
- - ``create_time`` (and values are of the format
- YYYY-MM-DDTHH:MM:SSZ)
- - ``expire_time`` (and values are of the format
- YYYY-MM-DDTHH:MM:SSZ)
- - ``version_time`` (and values are of the format
- YYYY-MM-DDTHH:MM:SSZ)
- - ``size_bytes``
+ - ``name``
+ - ``database``
+ - ``state``
+ - ``create_time`` (and values are of the format
+ YYYY-MM-DDTHH:MM:SSZ)
+ - ``expire_time`` (and values are of the format
+ YYYY-MM-DDTHH:MM:SSZ)
+ - ``version_time`` (and values are of the format
+ YYYY-MM-DDTHH:MM:SSZ)
+ - ``size_bytes``
+ - ``backup_schedules``
You can combine multiple expressions by enclosing each
expression in parentheses. By default, expressions are
@@ -293,21 +636,23 @@ class ListBackupsRequest(proto.Message):
Here are a few examples:
- - ``name:Howl`` - The backup's name contains the string
- "howl".
- - ``database:prod`` - The database's name contains the
- string "prod".
- - ``state:CREATING`` - The backup is pending creation.
- - ``state:READY`` - The backup is fully created and ready
- for use.
- - ``(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")``
- - The backup name contains the string "howl" and
- ``create_time`` of the backup is before
- 2018-03-28T14:50:00Z.
- - ``expire_time < \"2018-03-28T14:50:00Z\"`` - The backup
- ``expire_time`` is before 2018-03-28T14:50:00Z.
- - ``size_bytes > 10000000000`` - The backup's size is
- greater than 10GB
+ - ``name:Howl`` - The backup's name contains the string
+ "howl".
+ - ``database:prod`` - The database's name contains the
+ string "prod".
+ - ``state:CREATING`` - The backup is pending creation.
+ - ``state:READY`` - The backup is fully created and ready
+ for use.
+ - ``(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")``
+ - The backup name contains the string "howl" and
+ ``create_time`` of the backup is before
+ 2018-03-28T14:50:00Z.
+ - ``expire_time < \"2018-03-28T14:50:00Z\"`` - The backup
+ ``expire_time`` is before 2018-03-28T14:50:00Z.
+ - ``size_bytes > 10000000000`` - The backup's size is
+ greater than 10GB
+ - ``backup_schedules:daily`` - The backup is created from a
+ schedule with "daily" in its name.
page_size (int):
Number of backups to be returned in the
response. If 0 or less, defaults to the server's
@@ -320,10 +665,22 @@ class ListBackupsRequest(proto.Message):
to the same ``parent`` and with the same ``filter``.
"""
- parent = proto.Field(proto.STRING, number=1,)
- filter = proto.Field(proto.STRING, number=2,)
- page_size = proto.Field(proto.INT32, number=3,)
- page_token = proto.Field(proto.STRING, number=4,)
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ filter: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ page_size: int = proto.Field(
+ proto.INT32,
+ number=3,
+ )
+ page_token: str = proto.Field(
+ proto.STRING,
+ number=4,
+ )
class ListBackupsResponse(proto.Message):
@@ -331,7 +688,7 @@ class ListBackupsResponse(proto.Message):
[ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
Attributes:
- backups (Sequence[google.cloud.spanner_admin_database_v1.types.Backup]):
+ backups (MutableSequence[google.cloud.spanner_admin_database_v1.types.Backup]):
The list of matching backups. Backups returned are ordered
by ``create_time`` in descending order, starting from the
most recent ``create_time``.
@@ -345,8 +702,15 @@ class ListBackupsResponse(proto.Message):
def raw_page(self):
return self
- backups = proto.RepeatedField(proto.MESSAGE, number=1, message="Backup",)
- next_page_token = proto.Field(proto.STRING, number=2,)
+ backups: MutableSequence["Backup"] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message="Backup",
+ )
+ next_page_token: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
class ListBackupOperationsRequest(proto.Message):
@@ -372,19 +736,21 @@ class ListBackupOperationsRequest(proto.Message):
[operation][google.longrunning.Operation] are eligible for
filtering:
- - ``name`` - The name of the long-running operation
- - ``done`` - False if the operation is in progress, else
- true.
- - ``metadata.@type`` - the type of metadata. For example,
- the type string for
- [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
- is
- ``type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata``.
- - ``metadata.`` - any field in metadata.value.
- - ``error`` - Error associated with the long-running
- operation.
- - ``response.@type`` - the type of response.
- - ``response.`` - any field in response.value.
+ - ``name`` - The name of the long-running operation
+ - ``done`` - False if the operation is in progress, else
+ true.
+ - ``metadata.@type`` - the type of metadata. For example,
+ the type string for
+ [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
+ is
+ ``type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata``.
+ - ``metadata.`` - any field in metadata.value.
+ ``metadata.@type`` must be specified first if filtering on
+ metadata fields.
+ - ``error`` - Error associated with the long-running
+ operation.
+ - ``response.@type`` - the type of response.
+ - ``response.`` - any field in response.value.
You can combine multiple expressions by enclosing each
expression in parentheses. By default, expressions are
@@ -393,19 +759,55 @@ class ListBackupOperationsRequest(proto.Message):
Here are a few examples:
- - ``done:true`` - The operation is complete.
- - ``metadata.database:prod`` - The database the backup was
- taken from has a name containing the string "prod".
- - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND``
- ``(metadata.name:howl) AND``
- ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND``
- ``(error:*)`` - Returns operations where:
-
- - The operation's metadata type is
- [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
- - The backup name contains the string "howl".
- - The operation started before 2018-03-28T14:50:00Z.
- - The operation resulted in an error.
+ - ``done:true`` - The operation is complete.
+ - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND``
+ ``metadata.database:prod`` - Returns operations where:
+
+ - The operation's metadata type is
+ [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ - The source database name of backup contains the string
+ "prod".
+
+ - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND``
+ ``(metadata.name:howl) AND``
+ ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND``
+ ``(error:*)`` - Returns operations where:
+
+ - The operation's metadata type is
+ [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ - The backup name contains the string "howl".
+ - The operation started before 2018-03-28T14:50:00Z.
+ - The operation resulted in an error.
+
+ - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND``
+ ``(metadata.source_backup:test) AND``
+ ``(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND``
+ ``(error:*)`` - Returns operations where:
+
+ - The operation's metadata type is
+ [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ - The source backup name contains the string "test".
+ - The operation started before 2022-01-18T14:50:00Z.
+ - The operation resulted in an error.
+
+ - ``((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND``
+ ``(metadata.database:test_db)) OR``
+ ``((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND``
+ ``(metadata.source_backup:test_bkp)) AND``
+ ``(error:*)`` - Returns operations where:
+
+ - The operation's metadata matches either of criteria:
+
+ - The operation's metadata type is
+ [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
+ AND the source database name of the backup contains
+ the string "test_db"
+ - The operation's metadata type is
+ [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
+ AND the source backup name contains the string
+ "test_bkp"
+
+ - The operation resulted in an error.
page_size (int):
Number of operations to be returned in the
response. If 0 or less, defaults to the server's
@@ -418,10 +820,22 @@ class ListBackupOperationsRequest(proto.Message):
to the same ``parent`` and with the same ``filter``.
"""
- parent = proto.Field(proto.STRING, number=1,)
- filter = proto.Field(proto.STRING, number=2,)
- page_size = proto.Field(proto.INT32, number=3,)
- page_token = proto.Field(proto.STRING, number=4,)
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ filter: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ page_size: int = proto.Field(
+ proto.INT32,
+ number=3,
+ )
+ page_token: str = proto.Field(
+ proto.STRING,
+ number=4,
+ )
class ListBackupOperationsResponse(proto.Message):
@@ -429,14 +843,12 @@ class ListBackupOperationsResponse(proto.Message):
[ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
Attributes:
- operations (Sequence[google.longrunning.operations_pb2.Operation]):
+ operations (MutableSequence[google.longrunning.operations_pb2.Operation]):
The list of matching backup [long-running
operations][google.longrunning.Operation]. Each operation's
- name will be prefixed by the backup's name and the
- operation's
- [metadata][google.longrunning.Operation.metadata] will be of
- type
- [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ name will be prefixed by the backup's name. The operation's
+ [metadata][google.longrunning.Operation.metadata] field type
+ ``metadata.type_url`` describes the type of the metadata.
Operations returned include those that are pending or have
completed/failed/canceled within the last 7 days. Operations
returned are ordered by
@@ -453,14 +865,20 @@ class ListBackupOperationsResponse(proto.Message):
def raw_page(self):
return self
- operations = proto.RepeatedField(
- proto.MESSAGE, number=1, message=operations_pb2.Operation,
+ operations: MutableSequence[operations_pb2.Operation] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message=operations_pb2.Operation,
+ )
+ next_page_token: str = proto.Field(
+ proto.STRING,
+ number=2,
)
- next_page_token = proto.Field(proto.STRING, number=2,)
class BackupInfo(proto.Message):
r"""Information about a backup.
+
Attributes:
backup (str):
Name of the backup.
@@ -481,16 +899,29 @@ class BackupInfo(proto.Message):
from.
"""
- backup = proto.Field(proto.STRING, number=1,)
- version_time = proto.Field(
- proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,
+ backup: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ version_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=timestamp_pb2.Timestamp,
+ )
+ create_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=timestamp_pb2.Timestamp,
+ )
+ source_database: str = proto.Field(
+ proto.STRING,
+ number=3,
)
- create_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
- source_database = proto.Field(proto.STRING, number=3,)
class CreateBackupEncryptionConfig(proto.Message):
r"""Encryption configuration for the backup to create.
+
Attributes:
encryption_type (google.cloud.spanner_admin_database_v1.types.CreateBackupEncryptionConfig.EncryptionType):
Required. The encryption type of the backup.
@@ -500,17 +931,174 @@ class CreateBackupEncryptionConfig(proto.Message):
[encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
is ``CUSTOMER_MANAGED_ENCRYPTION``. Values are of the form
``projects//locations//keyRings//cryptoKeys/``.
+ kms_key_names (MutableSequence[str]):
+ Optional. Specifies the KMS configuration for the one or
+ more keys used to protect the backup. Values are of the form
+ ``projects//locations//keyRings//cryptoKeys/``.
+
+ The keys referenced by kms_key_names must fully cover all
+ regions of the backup's instance configuration. Some
+ examples:
+
+ - For single region instance configs, specify a single
+ regional location KMS key.
+ - For multi-regional instance configs of type
+ GOOGLE_MANAGED, either specify a multi-regional location
+ KMS key or multiple regional location KMS keys that cover
+ all regions in the instance config.
+ - For an instance config of type USER_MANAGED, please
+ specify only regional location KMS keys to cover each
+ region in the instance config. Multi-regional location KMS
+ keys are not supported for USER_MANAGED instance configs.
"""
class EncryptionType(proto.Enum):
- r"""Encryption types for the backup."""
+ r"""Encryption types for the backup.
+
+ Values:
+ ENCRYPTION_TYPE_UNSPECIFIED (0):
+ Unspecified. Do not use.
+ USE_DATABASE_ENCRYPTION (1):
+ Use the same encryption configuration as the database. This
+ is the default option when
+ [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig]
+ is empty. For example, if the database is using
+ ``Customer_Managed_Encryption``, the backup will be using
+ the same Cloud KMS key as the database.
+ GOOGLE_DEFAULT_ENCRYPTION (2):
+ Use Google default encryption.
+ CUSTOMER_MANAGED_ENCRYPTION (3):
+ Use customer managed encryption. If specified,
+ ``kms_key_name`` must contain a valid Cloud KMS key.
+ """
ENCRYPTION_TYPE_UNSPECIFIED = 0
USE_DATABASE_ENCRYPTION = 1
GOOGLE_DEFAULT_ENCRYPTION = 2
CUSTOMER_MANAGED_ENCRYPTION = 3
- encryption_type = proto.Field(proto.ENUM, number=1, enum=EncryptionType,)
- kms_key_name = proto.Field(proto.STRING, number=2,)
+ encryption_type: EncryptionType = proto.Field(
+ proto.ENUM,
+ number=1,
+ enum=EncryptionType,
+ )
+ kms_key_name: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ kms_key_names: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=3,
+ )
+
+
+class CopyBackupEncryptionConfig(proto.Message):
+ r"""Encryption configuration for the copied backup.
+
+ Attributes:
+ encryption_type (google.cloud.spanner_admin_database_v1.types.CopyBackupEncryptionConfig.EncryptionType):
+ Required. The encryption type of the backup.
+ kms_key_name (str):
+ Optional. The Cloud KMS key that will be used to protect the
+ backup. This field should be set only when
+ [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
+ is ``CUSTOMER_MANAGED_ENCRYPTION``. Values are of the form
+ ``projects//locations//keyRings//cryptoKeys/``.
+ kms_key_names (MutableSequence[str]):
+ Optional. Specifies the KMS configuration for the one or
+ more keys used to protect the backup. Values are of the form
+ ``projects//locations//keyRings//cryptoKeys/``.
+ Kms keys specified can be in any order.
+
+ The keys referenced by kms_key_names must fully cover all
+ regions of the backup's instance configuration. Some
+ examples:
+
+ - For single region instance configs, specify a single
+ regional location KMS key.
+ - For multi-regional instance configs of type
+ GOOGLE_MANAGED, either specify a multi-regional location
+ KMS key or multiple regional location KMS keys that cover
+ all regions in the instance config.
+ - For an instance config of type USER_MANAGED, please
+ specify only regional location KMS keys to cover each
+ region in the instance config. Multi-regional location KMS
+ keys are not supported for USER_MANAGED instance configs.
+ """
+
+ class EncryptionType(proto.Enum):
+ r"""Encryption types for the backup.
+
+ Values:
+ ENCRYPTION_TYPE_UNSPECIFIED (0):
+ Unspecified. Do not use.
+ USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION (1):
+ This is the default option for
+ [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
+ when
+ [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig]
+ is not specified. For example, if the source backup is using
+ ``Customer_Managed_Encryption``, the backup will be using
+ the same Cloud KMS key as the source backup.
+ GOOGLE_DEFAULT_ENCRYPTION (2):
+ Use Google default encryption.
+ CUSTOMER_MANAGED_ENCRYPTION (3):
+ Use customer managed encryption. If specified, either
+ ``kms_key_name`` or ``kms_key_names`` must contain valid
+ Cloud KMS key(s).
+ """
+ ENCRYPTION_TYPE_UNSPECIFIED = 0
+ USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1
+ GOOGLE_DEFAULT_ENCRYPTION = 2
+ CUSTOMER_MANAGED_ENCRYPTION = 3
+
+ encryption_type: EncryptionType = proto.Field(
+ proto.ENUM,
+ number=1,
+ enum=EncryptionType,
+ )
+ kms_key_name: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ kms_key_names: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=3,
+ )
+
+
+class FullBackupSpec(proto.Message):
+ r"""The specification for full backups.
+ A full backup stores the entire contents of the database at a
+ given version time.
+
+ """
+
+
+class IncrementalBackupSpec(proto.Message):
+ r"""The specification for incremental backup chains.
+ An incremental backup stores the delta of changes between a
+ previous backup and the database contents at a given version
+ time. An incremental backup chain consists of a full backup and
+ zero or more successive incremental backups. The first backup
+ created for an incremental backup chain is always a full backup.
+
+ """
+
+
+class BackupInstancePartition(proto.Message):
+ r"""Instance partition information for the backup.
+
+ Attributes:
+ instance_partition (str):
+ A unique identifier for the instance partition. Values are
+ of the form
+ ``projects//instances//instancePartitions/``
+ """
+
+ instance_partition: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/spanner_admin_database_v1/types/backup_schedule.py b/google/cloud/spanner_admin_database_v1/types/backup_schedule.py
new file mode 100644
index 0000000000..2773c1ef63
--- /dev/null
+++ b/google/cloud/spanner_admin_database_v1/types/backup_schedule.py
@@ -0,0 +1,369 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+
+from typing import MutableMapping, MutableSequence
+
+import proto # type: ignore
+
+from google.cloud.spanner_admin_database_v1.types import backup
+from google.protobuf import duration_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.spanner.admin.database.v1",
+ manifest={
+ "BackupScheduleSpec",
+ "BackupSchedule",
+ "CrontabSpec",
+ "CreateBackupScheduleRequest",
+ "GetBackupScheduleRequest",
+ "DeleteBackupScheduleRequest",
+ "ListBackupSchedulesRequest",
+ "ListBackupSchedulesResponse",
+ "UpdateBackupScheduleRequest",
+ },
+)
+
+
+class BackupScheduleSpec(proto.Message):
+ r"""Defines specifications of the backup schedule.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ cron_spec (google.cloud.spanner_admin_database_v1.types.CrontabSpec):
+ Cron style schedule specification.
+
+ This field is a member of `oneof`_ ``schedule_spec``.
+ """
+
+ cron_spec: "CrontabSpec" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ oneof="schedule_spec",
+ message="CrontabSpec",
+ )
+
+
+class BackupSchedule(proto.Message):
+ r"""BackupSchedule expresses the automated backup creation
+ specification for a Spanner database.
+ Next ID: 10
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ name (str):
+ Identifier. Output only for the
+ [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule]
+ operation. Required for the
+ [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
+ operation. A globally unique identifier for the backup
+ schedule which cannot be changed. Values are of the form
+ ``projects//instances//databases//backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]``
+ The final segment of the name must be between 2 and 60
+ characters in length.
+ spec (google.cloud.spanner_admin_database_v1.types.BackupScheduleSpec):
+ Optional. The schedule specification based on
+ which the backup creations are triggered.
+ retention_duration (google.protobuf.duration_pb2.Duration):
+ Optional. The retention duration of a backup
+ that must be at least 6 hours and at most 366
+ days. The backup is eligible to be automatically
+ deleted once the retention period has elapsed.
+ encryption_config (google.cloud.spanner_admin_database_v1.types.CreateBackupEncryptionConfig):
+ Optional. The encryption configuration that
+ will be used to encrypt the backup. If this
+ field is not specified, the backup will use the
+ same encryption configuration as the database.
+ full_backup_spec (google.cloud.spanner_admin_database_v1.types.FullBackupSpec):
+ The schedule creates only full backups.
+
+ This field is a member of `oneof`_ ``backup_type_spec``.
+ incremental_backup_spec (google.cloud.spanner_admin_database_v1.types.IncrementalBackupSpec):
+ The schedule creates incremental backup
+ chains.
+
+ This field is a member of `oneof`_ ``backup_type_spec``.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. The timestamp at which the
+ schedule was last updated. If the schedule has
+ never been updated, this field contains the
+ timestamp when the schedule was first created.
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ spec: "BackupScheduleSpec" = proto.Field(
+ proto.MESSAGE,
+ number=6,
+ message="BackupScheduleSpec",
+ )
+ retention_duration: duration_pb2.Duration = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=duration_pb2.Duration,
+ )
+ encryption_config: backup.CreateBackupEncryptionConfig = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=backup.CreateBackupEncryptionConfig,
+ )
+ full_backup_spec: backup.FullBackupSpec = proto.Field(
+ proto.MESSAGE,
+ number=7,
+ oneof="backup_type_spec",
+ message=backup.FullBackupSpec,
+ )
+ incremental_backup_spec: backup.IncrementalBackupSpec = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ oneof="backup_type_spec",
+ message=backup.IncrementalBackupSpec,
+ )
+ update_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=9,
+ message=timestamp_pb2.Timestamp,
+ )
+
+
+class CrontabSpec(proto.Message):
+ r"""CrontabSpec can be used to specify the version time and
+ frequency at which the backup should be created.
+
+ Attributes:
+ text (str):
+ Required. Textual representation of the crontab. User can
+ customize the backup frequency and the backup version time
+ using the cron expression. The version time must be in UTC
+ timezone.
+
+ The backup will contain an externally consistent copy of the
+ database at the version time. Allowed frequencies are 12
+ hour, 1 day, 1 week and 1 month. Examples of valid cron
+ specifications:
+
+ - ``0 2/12 * * *`` : every 12 hours at (2, 14) hours past
+ midnight in UTC.
+ - ``0 2,14 * * *`` : every 12 hours at (2,14) hours past
+ midnight in UTC.
+ - ``0 2 * * *`` : once a day at 2 past midnight in UTC.
+ - ``0 2 * * 0`` : once a week every Sunday at 2 past
+ midnight in UTC.
+ - ``0 2 8 * *`` : once a month on 8th day at 2 past midnight
+ in UTC.
+ time_zone (str):
+ Output only. The time zone of the times in
+ ``CrontabSpec.text``. Currently only UTC is supported.
+ creation_window (google.protobuf.duration_pb2.Duration):
+ Output only. Schedule backups will contain an externally
+ consistent copy of the database at the version time
+ specified in ``schedule_spec.cron_spec``. However, Spanner
+ may not initiate the creation of the scheduled backups at
+ that version time. Spanner will initiate the creation of
+ scheduled backups within the time window bounded by the
+ version_time specified in ``schedule_spec.cron_spec`` and
+ version_time + ``creation_window``.
+ """
+
+ text: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ time_zone: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ creation_window: duration_pb2.Duration = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=duration_pb2.Duration,
+ )
+
+
+class CreateBackupScheduleRequest(proto.Message):
+ r"""The request for
+ [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
+
+ Attributes:
+ parent (str):
+ Required. The name of the database that this
+ backup schedule applies to.
+ backup_schedule_id (str):
+ Required. The Id to use for the backup schedule. The
+ ``backup_schedule_id`` appended to ``parent`` forms the full
+ backup schedule name of the form
+ ``projects//instances//databases//backupSchedules/``.
+ backup_schedule (google.cloud.spanner_admin_database_v1.types.BackupSchedule):
+ Required. The backup schedule to create.
+ """
+
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ backup_schedule_id: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ backup_schedule: "BackupSchedule" = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message="BackupSchedule",
+ )
+
+
+class GetBackupScheduleRequest(proto.Message):
+ r"""The request for
+ [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
+
+ Attributes:
+ name (str):
+ Required. The name of the schedule to retrieve. Values are
+ of the form
+ ``projects//instances//databases//backupSchedules/``.
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+
+
+class DeleteBackupScheduleRequest(proto.Message):
+ r"""The request for
+ [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
+
+ Attributes:
+ name (str):
+ Required. The name of the schedule to delete. Values are of
+ the form
+ ``projects//instances//databases//backupSchedules/``.
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+
+
+class ListBackupSchedulesRequest(proto.Message):
+ r"""The request for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+
+ Attributes:
+ parent (str):
+ Required. Database is the parent resource
+ whose backup schedules should be listed. Values
+ are of the form
+ projects//instances//databases/
+ page_size (int):
+ Optional. Number of backup schedules to be
+ returned in the response. If 0 or less, defaults
+ to the server's maximum allowed page size.
+ page_token (str):
+ Optional. If non-empty, ``page_token`` should contain a
+ [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
+ from a previous
+ [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
+ to the same ``parent``.
+ """
+
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ page_size: int = proto.Field(
+ proto.INT32,
+ number=2,
+ )
+ page_token: str = proto.Field(
+ proto.STRING,
+ number=4,
+ )
+
+
+class ListBackupSchedulesResponse(proto.Message):
+ r"""The response for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+
+ Attributes:
+ backup_schedules (MutableSequence[google.cloud.spanner_admin_database_v1.types.BackupSchedule]):
+ The list of backup schedules for a database.
+ next_page_token (str):
+ ``next_page_token`` can be sent in a subsequent
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
+ call to fetch more of the schedules.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ backup_schedules: MutableSequence["BackupSchedule"] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message="BackupSchedule",
+ )
+ next_page_token: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+
+
+class UpdateBackupScheduleRequest(proto.Message):
+ r"""The request for
+ [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
+
+ Attributes:
+ backup_schedule (google.cloud.spanner_admin_database_v1.types.BackupSchedule):
+ Required. The backup schedule to update.
+ ``backup_schedule.name``, and the fields to be updated as
+ specified by ``update_mask`` are required. Other fields are
+ ignored.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. A mask specifying which fields in
+ the BackupSchedule resource should be updated.
+ This mask is relative to the BackupSchedule
+ resource, not to the request message. The field
+ mask must always be specified; this prevents any
+ future fields from being erased accidentally.
+ """
+
+ backup_schedule: "BackupSchedule" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="BackupSchedule",
+ )
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=field_mask_pb2.FieldMask,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/spanner_admin_database_v1/types/common.py b/google/cloud/spanner_admin_database_v1/types/common.py
index 38020dcd4e..fff1a8756c 100644
--- a/google/cloud/spanner_admin_database_v1/types/common.py
+++ b/google/cloud/spanner_admin_database_v1/types/common.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from __future__ import annotations
+
+from typing import MutableMapping, MutableSequence
+
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
@@ -21,10 +25,32 @@
__protobuf__ = proto.module(
package="google.spanner.admin.database.v1",
- manifest={"OperationProgress", "EncryptionConfig", "EncryptionInfo",},
+ manifest={
+ "DatabaseDialect",
+ "OperationProgress",
+ "EncryptionConfig",
+ "EncryptionInfo",
+ },
)
+class DatabaseDialect(proto.Enum):
+ r"""Indicates the dialect type of a database.
+
+ Values:
+ DATABASE_DIALECT_UNSPECIFIED (0):
+ Default value. This value will create a database with the
+ GOOGLE_STANDARD_SQL dialect.
+ GOOGLE_STANDARD_SQL (1):
+ GoogleSQL supported SQL.
+ POSTGRESQL (2):
+ PostgreSQL supported SQL.
+ """
+ DATABASE_DIALECT_UNSPECIFIED = 0
+ GOOGLE_STANDARD_SQL = 1
+ POSTGRESQL = 2
+
+
class OperationProgress(proto.Message):
r"""Encapsulates progress related information for a Cloud Spanner
long running operation.
@@ -40,21 +66,60 @@ class OperationProgress(proto.Message):
failed or was completed successfully.
"""
- progress_percent = proto.Field(proto.INT32, number=1,)
- start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
- end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
+ progress_percent: int = proto.Field(
+ proto.INT32,
+ number=1,
+ )
+ start_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=timestamp_pb2.Timestamp,
+ )
+ end_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=timestamp_pb2.Timestamp,
+ )
class EncryptionConfig(proto.Message):
r"""Encryption configuration for a Cloud Spanner database.
+
Attributes:
kms_key_name (str):
The Cloud KMS key to be used for encrypting and decrypting
the database. Values are of the form
``projects//locations//keyRings//cryptoKeys/``.
+ kms_key_names (MutableSequence[str]):
+ Specifies the KMS configuration for the one or more keys
+ used to encrypt the database. Values are of the form
+ ``projects//locations//keyRings//cryptoKeys/``.
+
+ The keys referenced by kms_key_names must fully cover all
+ regions of the database instance configuration. Some
+ examples:
+
+ - For single region database instance configs, specify a
+ single regional location KMS key.
+ - For multi-regional database instance configs of type
+ GOOGLE_MANAGED, either specify a multi-regional location
+ KMS key or multiple regional location KMS keys that cover
+ all regions in the instance config.
+ - For a database instance config of type USER_MANAGED,
+ please specify only regional location KMS keys to cover
+ each region in the instance config. Multi-regional
+ location KMS keys are not supported for USER_MANAGED
+ instance configs.
"""
- kms_key_name = proto.Field(proto.STRING, number=2,)
+ kms_key_name: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ kms_key_names: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=3,
+ )
class EncryptionInfo(proto.Message):
@@ -75,14 +140,41 @@ class EncryptionInfo(proto.Message):
"""
class Type(proto.Enum):
- r"""Possible encryption types."""
+ r"""Possible encryption types.
+
+ Values:
+ TYPE_UNSPECIFIED (0):
+ Encryption type was not specified, though
+ data at rest remains encrypted.
+ GOOGLE_DEFAULT_ENCRYPTION (1):
+ The data is encrypted at rest with a key that
+ is fully managed by Google. No key version or
+ status will be populated. This is the default
+ state.
+ CUSTOMER_MANAGED_ENCRYPTION (2):
+ The data is encrypted at rest with a key that is managed by
+ the customer. The active version of the key.
+ ``kms_key_version`` will be populated, and
+ ``encryption_status`` may be populated.
+ """
TYPE_UNSPECIFIED = 0
GOOGLE_DEFAULT_ENCRYPTION = 1
CUSTOMER_MANAGED_ENCRYPTION = 2
- encryption_type = proto.Field(proto.ENUM, number=3, enum=Type,)
- encryption_status = proto.Field(proto.MESSAGE, number=4, message=status_pb2.Status,)
- kms_key_version = proto.Field(proto.STRING, number=2,)
+ encryption_type: Type = proto.Field(
+ proto.ENUM,
+ number=3,
+ enum=Type,
+ )
+ encryption_status: status_pb2.Status = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=status_pb2.Status,
+ )
+ kms_key_version: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py
index e7aee2ac1e..c82fdc87df 100644
--- a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py
+++ b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,12 +13,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from __future__ import annotations
+
+from typing import MutableMapping, MutableSequence
+
import proto # type: ignore
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
from google.cloud.spanner_admin_database_v1.types import common
from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
+from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -32,7 +39,10 @@
"CreateDatabaseRequest",
"CreateDatabaseMetadata",
"GetDatabaseRequest",
+ "UpdateDatabaseRequest",
+ "UpdateDatabaseMetadata",
"UpdateDatabaseDdlRequest",
+ "DdlStatementActionInfo",
"UpdateDatabaseDdlMetadata",
"DropDatabaseRequest",
"GetDatabaseDdlRequest",
@@ -43,34 +53,63 @@
"RestoreDatabaseEncryptionConfig",
"RestoreDatabaseMetadata",
"OptimizeRestoredDatabaseMetadata",
+ "DatabaseRole",
+ "ListDatabaseRolesRequest",
+ "ListDatabaseRolesResponse",
+ "AddSplitPointsRequest",
+ "AddSplitPointsResponse",
+ "SplitPoints",
+ "InternalUpdateGraphOperationRequest",
+ "InternalUpdateGraphOperationResponse",
},
)
class RestoreSourceType(proto.Enum):
- r"""Indicates the type of the restore source."""
+ r"""Indicates the type of the restore source.
+
+ Values:
+ TYPE_UNSPECIFIED (0):
+ No restore associated.
+ BACKUP (1):
+ A backup was used as the source of the
+ restore.
+ """
TYPE_UNSPECIFIED = 0
BACKUP = 1
class RestoreInfo(proto.Message):
r"""Information about the database restore.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
source_type (google.cloud.spanner_admin_database_v1.types.RestoreSourceType):
The type of the restore source.
backup_info (google.cloud.spanner_admin_database_v1.types.BackupInfo):
Information about the backup used to restore
the database. The backup may no longer exist.
+
+ This field is a member of `oneof`_ ``source_info``.
"""
- source_type = proto.Field(proto.ENUM, number=1, enum="RestoreSourceType",)
- backup_info = proto.Field(
- proto.MESSAGE, number=2, oneof="source_info", message=gsad_backup.BackupInfo,
+ source_type: "RestoreSourceType" = proto.Field(
+ proto.ENUM,
+ number=1,
+ enum="RestoreSourceType",
+ )
+ backup_info: gsad_backup.BackupInfo = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="source_info",
+ message=gsad_backup.BackupInfo,
)
class Database(proto.Message):
r"""A Cloud Spanner database.
+
Attributes:
name (str):
Required. The name of the database. Values are of the form
@@ -93,19 +132,20 @@ class Database(proto.Message):
the encryption configuration for the database.
For databases that are using Google default or
other types of encryption, this field is empty.
- encryption_info (Sequence[google.cloud.spanner_admin_database_v1.types.EncryptionInfo]):
- Output only. For databases that are using
- customer managed encryption, this field contains
- the encryption information for the database,
- such as encryption state and the Cloud KMS key
- versions that are in use.
- For databases that are using Google default or
- other types of encryption, this field is empty.
-
- This field is propagated lazily from the
- backend. There might be a delay from when a key
- version is being used and when it appears in
- this field.
+ encryption_info (MutableSequence[google.cloud.spanner_admin_database_v1.types.EncryptionInfo]):
+ Output only. For databases that are using customer managed
+ encryption, this field contains the encryption information
+ for the database, such as all Cloud KMS key versions that
+ are in use. The
+ ``encryption_status' field inside of each``\ EncryptionInfo\`
+ is not populated.
+
+ For databases that are using Google default or other types
+ of encryption, this field is empty.
+
+ This field is propagated lazily from the backend. There
+ might be a delay from when a key version is being used and
+ when it appears in this field.
version_retention_period (str):
Output only. The period in which Cloud Spanner retains all
versions of data for the database. This is the same as the
@@ -129,30 +169,104 @@ class Database(proto.Message):
option set using DatabaseAdmin.CreateDatabase or
DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this
is empty.
+ database_dialect (google.cloud.spanner_admin_database_v1.types.DatabaseDialect):
+ Output only. The dialect of the Cloud Spanner
+ Database.
+ enable_drop_protection (bool):
+ Whether drop protection is enabled for this database.
+ Defaults to false, if not set. For more details, please see
+ how to `prevent accidental database
+ deletion `__.
+ reconciling (bool):
+ Output only. If true, the database is being
+ updated. If false, there are no ongoing update
+ operations for the database.
"""
class State(proto.Enum):
- r"""Indicates the current state of the database."""
+ r"""Indicates the current state of the database.
+
+ Values:
+ STATE_UNSPECIFIED (0):
+ Not specified.
+ CREATING (1):
+ The database is still being created. Operations on the
+ database may fail with ``FAILED_PRECONDITION`` in this
+ state.
+ READY (2):
+ The database is fully created and ready for
+ use.
+ READY_OPTIMIZING (3):
+ The database is fully created and ready for use, but is
+ still being optimized for performance and cannot handle full
+ load.
+
+ In this state, the database still references the backup it
+ was restore from, preventing the backup from being deleted.
+ When optimizations are complete, the full performance of the
+ database will be restored, and the database will transition
+ to ``READY`` state.
+ """
STATE_UNSPECIFIED = 0
CREATING = 1
READY = 2
READY_OPTIMIZING = 3
- name = proto.Field(proto.STRING, number=1,)
- state = proto.Field(proto.ENUM, number=2, enum=State,)
- create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
- restore_info = proto.Field(proto.MESSAGE, number=4, message="RestoreInfo",)
- encryption_config = proto.Field(
- proto.MESSAGE, number=5, message=common.EncryptionConfig,
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
)
- encryption_info = proto.RepeatedField(
- proto.MESSAGE, number=8, message=common.EncryptionInfo,
+ state: State = proto.Field(
+ proto.ENUM,
+ number=2,
+ enum=State,
)
- version_retention_period = proto.Field(proto.STRING, number=6,)
- earliest_version_time = proto.Field(
- proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,
+ create_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=timestamp_pb2.Timestamp,
+ )
+ restore_info: "RestoreInfo" = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message="RestoreInfo",
+ )
+ encryption_config: common.EncryptionConfig = proto.Field(
+ proto.MESSAGE,
+ number=5,
+ message=common.EncryptionConfig,
+ )
+ encryption_info: MutableSequence[common.EncryptionInfo] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=8,
+ message=common.EncryptionInfo,
+ )
+ version_retention_period: str = proto.Field(
+ proto.STRING,
+ number=6,
+ )
+ earliest_version_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=7,
+ message=timestamp_pb2.Timestamp,
+ )
+ default_leader: str = proto.Field(
+ proto.STRING,
+ number=9,
+ )
+ database_dialect: common.DatabaseDialect = proto.Field(
+ proto.ENUM,
+ number=10,
+ enum=common.DatabaseDialect,
+ )
+ enable_drop_protection: bool = proto.Field(
+ proto.BOOL,
+ number=11,
+ )
+ reconciling: bool = proto.Field(
+ proto.BOOL,
+ number=12,
)
- default_leader = proto.Field(proto.STRING, number=9,)
class ListDatabasesRequest(proto.Message):
@@ -175,9 +289,18 @@ class ListDatabasesRequest(proto.Message):
[ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
"""
- parent = proto.Field(proto.STRING, number=1,)
- page_size = proto.Field(proto.INT32, number=3,)
- page_token = proto.Field(proto.STRING, number=4,)
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ page_size: int = proto.Field(
+ proto.INT32,
+ number=3,
+ )
+ page_token: str = proto.Field(
+ proto.STRING,
+ number=4,
+ )
class ListDatabasesResponse(proto.Message):
@@ -185,7 +308,7 @@ class ListDatabasesResponse(proto.Message):
[ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
Attributes:
- databases (Sequence[google.cloud.spanner_admin_database_v1.types.Database]):
+ databases (MutableSequence[google.cloud.spanner_admin_database_v1.types.Database]):
Databases that matched the request.
next_page_token (str):
``next_page_token`` can be sent in a subsequent
@@ -197,8 +320,15 @@ class ListDatabasesResponse(proto.Message):
def raw_page(self):
return self
- databases = proto.RepeatedField(proto.MESSAGE, number=1, message="Database",)
- next_page_token = proto.Field(proto.STRING, number=2,)
+ databases: MutableSequence["Database"] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message="Database",
+ )
+ next_page_token: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
class CreateDatabaseRequest(proto.Message):
@@ -217,25 +347,70 @@ class CreateDatabaseRequest(proto.Message):
between 2 and 30 characters in length. If the database ID is
a reserved word or if it contains a hyphen, the database ID
must be enclosed in backticks (:literal:`\``).
- extra_statements (Sequence[str]):
+ extra_statements (MutableSequence[str]):
Optional. A list of DDL statements to run
inside the newly created database. Statements
can create tables, indexes, etc. These
statements execute atomically with the creation
- of the database: if there is an error in any
- statement, the database is not created.
+ of the database:
+
+ if there is an error in any statement, the
+ database is not created.
encryption_config (google.cloud.spanner_admin_database_v1.types.EncryptionConfig):
Optional. The encryption configuration for
the database. If this field is not specified,
Cloud Spanner will encrypt/decrypt all data at
rest using Google default encryption.
+ database_dialect (google.cloud.spanner_admin_database_v1.types.DatabaseDialect):
+ Optional. The dialect of the Cloud Spanner
+ Database.
+ proto_descriptors (bytes):
+ Optional. Proto descriptors used by CREATE/ALTER PROTO
+ BUNDLE statements in 'extra_statements' above. Contains a
+ protobuf-serialized
+ `google.protobuf.FileDescriptorSet `__.
+ To generate it,
+ `install `__ and
+ run ``protoc`` with --include_imports and
+ --descriptor_set_out. For example, to generate for
+ moon/shot/app.proto, run
+
+ ::
+
+ $protoc --proto_path=/app_path --proto_path=/lib_path \
+ --include_imports \
+ --descriptor_set_out=descriptors.data \
+ moon/shot/app.proto
+
+ For more details, see protobuffer `self
+ description `__.
"""
- parent = proto.Field(proto.STRING, number=1,)
- create_statement = proto.Field(proto.STRING, number=2,)
- extra_statements = proto.RepeatedField(proto.STRING, number=3,)
- encryption_config = proto.Field(
- proto.MESSAGE, number=4, message=common.EncryptionConfig,
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ create_statement: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ extra_statements: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=3,
+ )
+ encryption_config: common.EncryptionConfig = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=common.EncryptionConfig,
+ )
+ database_dialect: common.DatabaseDialect = proto.Field(
+ proto.ENUM,
+ number=5,
+ enum=common.DatabaseDialect,
+ )
+ proto_descriptors: bytes = proto.Field(
+ proto.BYTES,
+ number=6,
)
@@ -248,7 +423,10 @@ class CreateDatabaseMetadata(proto.Message):
The database being created.
"""
- database = proto.Field(proto.STRING, number=1,)
+ database: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
class GetDatabaseRequest(proto.Message):
@@ -262,7 +440,72 @@ class GetDatabaseRequest(proto.Message):
``projects//instances//databases/``.
"""
- name = proto.Field(proto.STRING, number=1,)
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+
+
+class UpdateDatabaseRequest(proto.Message):
+ r"""The request for
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+
+ Attributes:
+ database (google.cloud.spanner_admin_database_v1.types.Database):
+ Required. The database to update. The ``name`` field of the
+ database is of the form
+ ``projects//instances//databases/``.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The list of fields to update. Currently, only
+ ``enable_drop_protection`` field can be updated.
+ """
+
+ database: "Database" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="Database",
+ )
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=field_mask_pb2.FieldMask,
+ )
+
+
+class UpdateDatabaseMetadata(proto.Message):
+ r"""Metadata type for the operation returned by
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+
+ Attributes:
+ request (google.cloud.spanner_admin_database_v1.types.UpdateDatabaseRequest):
+ The request for
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+ progress (google.cloud.spanner_admin_database_v1.types.OperationProgress):
+ The progress of the
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
+ operation.
+ cancel_time (google.protobuf.timestamp_pb2.Timestamp):
+ The time at which this operation was
+ cancelled. If set, this operation is in the
+ process of undoing itself (which is
+ best-effort).
+ """
+
+ request: "UpdateDatabaseRequest" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="UpdateDatabaseRequest",
+ )
+ progress: common.OperationProgress = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=common.OperationProgress,
+ )
+ cancel_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=timestamp_pb2.Timestamp,
+ )
class UpdateDatabaseDdlRequest(proto.Message):
@@ -286,7 +529,7 @@ class UpdateDatabaseDdlRequest(proto.Message):
Attributes:
database (str):
Required. The database to update.
- statements (Sequence[str]):
+ statements (MutableSequence[str]):
Required. DDL statements to be applied to the
database.
operation_id (str):
@@ -311,11 +554,91 @@ class UpdateDatabaseDdlRequest(proto.Message):
underscore. If the named operation already exists,
[UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
returns ``ALREADY_EXISTS``.
+ proto_descriptors (bytes):
+ Optional. Proto descriptors used by CREATE/ALTER PROTO
+ BUNDLE statements. Contains a protobuf-serialized
+ `google.protobuf.FileDescriptorSet `__.
+ To generate it,
+ `install `__ and
+ run ``protoc`` with --include_imports and
+ --descriptor_set_out. For example, to generate for
+ moon/shot/app.proto, run
+
+ ::
+
+ $protoc --proto_path=/app_path --proto_path=/lib_path \
+ --include_imports \
+ --descriptor_set_out=descriptors.data \
+ moon/shot/app.proto
+
+ For more details, see protobuffer `self
+ description `__.
+ throughput_mode (bool):
+ Optional. This field is exposed to be used by the Spanner
+ Migration Tool. For more details, see
+ `SMT `__.
"""
- database = proto.Field(proto.STRING, number=1,)
- statements = proto.RepeatedField(proto.STRING, number=2,)
- operation_id = proto.Field(proto.STRING, number=3,)
+ database: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ statements: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=2,
+ )
+ operation_id: str = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+ proto_descriptors: bytes = proto.Field(
+ proto.BYTES,
+ number=4,
+ )
+ throughput_mode: bool = proto.Field(
+ proto.BOOL,
+ number=5,
+ )
+
+
+class DdlStatementActionInfo(proto.Message):
+ r"""Action information extracted from a DDL statement. This proto is
+ used to display the brief info of the DDL statement for the
+ operation
+ [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
+
+ Attributes:
+ action (str):
+ The action for the DDL statement, e.g.
+ CREATE, ALTER, DROP, GRANT, etc. This field is a
+ non-empty string.
+ entity_type (str):
+ The entity type for the DDL statement, e.g. TABLE, INDEX,
+ VIEW, etc. This field can be empty string for some DDL
+ statement, e.g. for statement "ANALYZE", ``entity_type`` =
+ "".
+ entity_names (MutableSequence[str]):
+ The entity name(s) being operated on the DDL statement. E.g.
+
+ 1. For statement "CREATE TABLE t1(...)", ``entity_names`` =
+ ["t1"].
+ 2. For statement "GRANT ROLE r1, r2 ...", ``entity_names`` =
+ ["r1", "r2"].
+ 3. For statement "ANALYZE", ``entity_names`` = [].
+ """
+
+ action: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ entity_type: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ entity_names: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=3,
+ )
class UpdateDatabaseDdlMetadata(proto.Message):
@@ -325,40 +648,60 @@ class UpdateDatabaseDdlMetadata(proto.Message):
Attributes:
database (str):
The database being modified.
- statements (Sequence[str]):
+ statements (MutableSequence[str]):
For an update this list contains all the
statements. For an individual statement, this
list contains only that statement.
- commit_timestamps (Sequence[google.protobuf.timestamp_pb2.Timestamp]):
+ commit_timestamps (MutableSequence[google.protobuf.timestamp_pb2.Timestamp]):
Reports the commit timestamps of all statements that have
succeeded so far, where ``commit_timestamps[i]`` is the
commit timestamp for the statement ``statements[i]``.
throttled (bool):
Output only. When true, indicates that the
- operation is throttled e.g due to resource
+ operation is throttled e.g. due to resource
constraints. When resources become available the
operation will resume and this field will be
false again.
- progress (Sequence[google.cloud.spanner_admin_database_v1.types.OperationProgress]):
+ progress (MutableSequence[google.cloud.spanner_admin_database_v1.types.OperationProgress]):
The progress of the
[UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
- operations. Currently, only index creation statements will
- have a continuously updating progress. For non-index
- creation statements, ``progress[i]`` will have start time
- and end time populated with commit timestamp of operation,
- as well as a progress of 100% once the operation has
- completed. ``progress[i]`` is the operation progress for
- ``statements[i]``.
+ operations. All DDL statements will have continuously
+ updating progress, and ``progress[i]`` is the operation
+ progress for ``statements[i]``. Also, ``progress[i]`` will
+ have start time and end time populated with commit timestamp
+ of operation, as well as a progress of 100% once the
+ operation has completed.
+ actions (MutableSequence[google.cloud.spanner_admin_database_v1.types.DdlStatementActionInfo]):
+ The brief action info for the DDL statements. ``actions[i]``
+ is the brief info for ``statements[i]``.
"""
- database = proto.Field(proto.STRING, number=1,)
- statements = proto.RepeatedField(proto.STRING, number=2,)
- commit_timestamps = proto.RepeatedField(
- proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,
+ database: str = proto.Field(
+ proto.STRING,
+ number=1,
)
- throttled = proto.Field(proto.BOOL, number=4,)
- progress = proto.RepeatedField(
- proto.MESSAGE, number=5, message=common.OperationProgress,
+ statements: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=2,
+ )
+ commit_timestamps: MutableSequence[timestamp_pb2.Timestamp] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=3,
+ message=timestamp_pb2.Timestamp,
+ )
+ throttled: bool = proto.Field(
+ proto.BOOL,
+ number=4,
+ )
+ progress: MutableSequence[common.OperationProgress] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=5,
+ message=common.OperationProgress,
+ )
+ actions: MutableSequence["DdlStatementActionInfo"] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=6,
+ message="DdlStatementActionInfo",
)
@@ -371,7 +714,10 @@ class DropDatabaseRequest(proto.Message):
Required. The database to be dropped.
"""
- database = proto.Field(proto.STRING, number=1,)
+ database: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
class GetDatabaseDdlRequest(proto.Message):
@@ -385,7 +731,10 @@ class GetDatabaseDdlRequest(proto.Message):
``projects//instances//databases/``
"""
- database = proto.Field(proto.STRING, number=1,)
+ database: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
class GetDatabaseDdlResponse(proto.Message):
@@ -393,13 +742,26 @@ class GetDatabaseDdlResponse(proto.Message):
[GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
Attributes:
- statements (Sequence[str]):
+ statements (MutableSequence[str]):
A list of formatted DDL statements defining
the schema of the database specified in the
request.
+ proto_descriptors (bytes):
+ Proto descriptors stored in the database. Contains a
+ protobuf-serialized
+ `google.protobuf.FileDescriptorSet `__.
+ For more details, see protobuffer `self
+ description `__.
"""
- statements = proto.RepeatedField(proto.STRING, number=1,)
+ statements: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=1,
+ )
+ proto_descriptors: bytes = proto.Field(
+ proto.BYTES,
+ number=2,
+ )
class ListDatabaseOperationsRequest(proto.Message):
@@ -424,19 +786,21 @@ class ListDatabaseOperationsRequest(proto.Message):
[Operation][google.longrunning.Operation] are eligible for
filtering:
- - ``name`` - The name of the long-running operation
- - ``done`` - False if the operation is in progress, else
- true.
- - ``metadata.@type`` - the type of metadata. For example,
- the type string for
- [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
- is
- ``type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata``.
- - ``metadata.`` - any field in metadata.value.
- - ``error`` - Error associated with the long-running
- operation.
- - ``response.@type`` - the type of response.
- - ``response.`` - any field in response.value.
+ - ``name`` - The name of the long-running operation
+ - ``done`` - False if the operation is in progress, else
+ true.
+ - ``metadata.@type`` - the type of metadata. For example,
+ the type string for
+ [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
+ is
+ ``type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata``.
+ - ``metadata.`` - any field in metadata.value.
+ ``metadata.@type`` must be specified first, if filtering
+ on metadata fields.
+ - ``error`` - Error associated with the long-running
+ operation.
+ - ``response.@type`` - the type of response.
+ - ``response.`` - any field in response.value.
You can combine multiple expressions by enclosing each
expression in parentheses. By default, expressions are
@@ -445,21 +809,21 @@ class ListDatabaseOperationsRequest(proto.Message):
Here are a few examples:
- - ``done:true`` - The operation is complete.
- - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND``
- ``(metadata.source_type:BACKUP) AND``
- ``(metadata.backup_info.backup:backup_howl) AND``
- ``(metadata.name:restored_howl) AND``
- ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND``
- ``(error:*)`` - Return operations where:
-
- - The operation's metadata type is
- [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
- - The database is restored from a backup.
- - The backup name contains "backup_howl".
- - The restored database's name contains "restored_howl".
- - The operation started before 2018-03-28T14:50:00Z.
- - The operation resulted in an error.
+ - ``done:true`` - The operation is complete.
+ - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND``
+ ``(metadata.source_type:BACKUP) AND``
+ ``(metadata.backup_info.backup:backup_howl) AND``
+ ``(metadata.name:restored_howl) AND``
+ ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND``
+ ``(error:*)`` - Return operations where:
+
+ - The operation's metadata type is
+ [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
+ - The database is restored from a backup.
+ - The backup name contains "backup_howl".
+ - The restored database's name contains "restored_howl".
+ - The operation started before 2018-03-28T14:50:00Z.
+ - The operation resulted in an error.
page_size (int):
Number of operations to be returned in the
response. If 0 or less, defaults to the server's
@@ -472,10 +836,22 @@ class ListDatabaseOperationsRequest(proto.Message):
to the same ``parent`` and with the same ``filter``.
"""
- parent = proto.Field(proto.STRING, number=1,)
- filter = proto.Field(proto.STRING, number=2,)
- page_size = proto.Field(proto.INT32, number=3,)
- page_token = proto.Field(proto.STRING, number=4,)
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ filter: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ page_size: int = proto.Field(
+ proto.INT32,
+ number=3,
+ )
+ page_token: str = proto.Field(
+ proto.STRING,
+ number=4,
+ )
class ListDatabaseOperationsResponse(proto.Message):
@@ -483,7 +859,7 @@ class ListDatabaseOperationsResponse(proto.Message):
[ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
Attributes:
- operations (Sequence[google.longrunning.operations_pb2.Operation]):
+ operations (MutableSequence[google.longrunning.operations_pb2.Operation]):
The list of matching database [long-running
operations][google.longrunning.Operation]. Each operation's
name will be prefixed by the database's name. The
@@ -500,16 +876,24 @@ class ListDatabaseOperationsResponse(proto.Message):
def raw_page(self):
return self
- operations = proto.RepeatedField(
- proto.MESSAGE, number=1, message=operations_pb2.Operation,
+ operations: MutableSequence[operations_pb2.Operation] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message=operations_pb2.Operation,
+ )
+ next_page_token: str = proto.Field(
+ proto.STRING,
+ number=2,
)
- next_page_token = proto.Field(proto.STRING, number=2,)
class RestoreDatabaseRequest(proto.Message):
r"""The request for
[RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
parent (str):
Required. The name of the instance in which to create the
@@ -527,6 +911,8 @@ class RestoreDatabaseRequest(proto.Message):
Name of the backup from which to restore. Values are of the
form
``projects//instances//backups/``.
+
+ This field is a member of `oneof`_ ``source``.
encryption_config (google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig):
Optional. An encryption configuration describing the
encryption type and key resources in Cloud KMS used to
@@ -537,16 +923,29 @@ class RestoreDatabaseRequest(proto.Message):
= ``USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION``.
"""
- parent = proto.Field(proto.STRING, number=1,)
- database_id = proto.Field(proto.STRING, number=2,)
- backup = proto.Field(proto.STRING, number=3, oneof="source",)
- encryption_config = proto.Field(
- proto.MESSAGE, number=4, message="RestoreDatabaseEncryptionConfig",
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ database_id: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ backup: str = proto.Field(
+ proto.STRING,
+ number=3,
+ oneof="source",
+ )
+ encryption_config: "RestoreDatabaseEncryptionConfig" = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message="RestoreDatabaseEncryptionConfig",
)
class RestoreDatabaseEncryptionConfig(proto.Message):
r"""Encryption configuration for the restored database.
+
Attributes:
encryption_type (google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig.EncryptionType):
Required. The encryption type of the restored
@@ -558,23 +957,72 @@ class RestoreDatabaseEncryptionConfig(proto.Message):
[encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
is ``CUSTOMER_MANAGED_ENCRYPTION``. Values are of the form
``projects//locations//keyRings//cryptoKeys/``.
+ kms_key_names (MutableSequence[str]):
+ Optional. Specifies the KMS configuration for the one or
+ more keys used to encrypt the database. Values are of the
+ form
+ ``projects//locations//keyRings//cryptoKeys/``.
+
+ The keys referenced by kms_key_names must fully cover all
+ regions of the database instance configuration. Some
+ examples:
+
+ - For single region database instance configs, specify a
+ single regional location KMS key.
+ - For multi-regional database instance configs of type
+ GOOGLE_MANAGED, either specify a multi-regional location
+ KMS key or multiple regional location KMS keys that cover
+ all regions in the instance config.
+ - For a database instance config of type USER_MANAGED,
+ please specify only regional location KMS keys to cover
+ each region in the instance config. Multi-regional
+ location KMS keys are not supported for USER_MANAGED
+ instance configs.
"""
class EncryptionType(proto.Enum):
- r"""Encryption types for the database to be restored."""
+ r"""Encryption types for the database to be restored.
+
+ Values:
+ ENCRYPTION_TYPE_UNSPECIFIED (0):
+ Unspecified. Do not use.
+ USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION (1):
+ This is the default option when
+ [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig]
+ is not specified.
+ GOOGLE_DEFAULT_ENCRYPTION (2):
+ Use Google default encryption.
+ CUSTOMER_MANAGED_ENCRYPTION (3):
+ Use customer managed encryption. If specified,
+ ``kms_key_name`` must must contain a valid Cloud KMS key.
+ """
ENCRYPTION_TYPE_UNSPECIFIED = 0
USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1
GOOGLE_DEFAULT_ENCRYPTION = 2
CUSTOMER_MANAGED_ENCRYPTION = 3
- encryption_type = proto.Field(proto.ENUM, number=1, enum=EncryptionType,)
- kms_key_name = proto.Field(proto.STRING, number=2,)
+ encryption_type: EncryptionType = proto.Field(
+ proto.ENUM,
+ number=1,
+ enum=EncryptionType,
+ )
+ kms_key_name: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ kms_key_names: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=3,
+ )
class RestoreDatabaseMetadata(proto.Message):
r"""Metadata type for the long-running operation returned by
[RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
name (str):
Name of the database being created and
@@ -584,6 +1032,8 @@ class RestoreDatabaseMetadata(proto.Message):
backup_info (google.cloud.spanner_admin_database_v1.types.BackupInfo):
Information about the backup used to restore
the database.
+
+ This field is a member of `oneof`_ ``source_info``.
progress (google.cloud.spanner_admin_database_v1.types.OperationProgress):
The progress of the
[RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
@@ -619,14 +1069,35 @@ class RestoreDatabaseMetadata(proto.Message):
if the restore was not successful.
"""
- name = proto.Field(proto.STRING, number=1,)
- source_type = proto.Field(proto.ENUM, number=2, enum="RestoreSourceType",)
- backup_info = proto.Field(
- proto.MESSAGE, number=3, oneof="source_info", message=gsad_backup.BackupInfo,
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ source_type: "RestoreSourceType" = proto.Field(
+ proto.ENUM,
+ number=2,
+ enum="RestoreSourceType",
+ )
+ backup_info: gsad_backup.BackupInfo = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ oneof="source_info",
+ message=gsad_backup.BackupInfo,
+ )
+ progress: common.OperationProgress = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=common.OperationProgress,
+ )
+ cancel_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=5,
+ message=timestamp_pb2.Timestamp,
+ )
+ optimize_database_operation_name: str = proto.Field(
+ proto.STRING,
+ number=6,
)
- progress = proto.Field(proto.MESSAGE, number=4, message=common.OperationProgress,)
- cancel_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
- optimize_database_operation_name = proto.Field(proto.STRING, number=6,)
class OptimizeRestoredDatabaseMetadata(proto.Message):
@@ -645,8 +1116,234 @@ class OptimizeRestoredDatabaseMetadata(proto.Message):
optimizations.
"""
- name = proto.Field(proto.STRING, number=1,)
- progress = proto.Field(proto.MESSAGE, number=2, message=common.OperationProgress,)
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ progress: common.OperationProgress = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=common.OperationProgress,
+ )
+
+
+class DatabaseRole(proto.Message):
+ r"""A Cloud Spanner database role.
+
+ Attributes:
+ name (str):
+ Required. The name of the database role. Values are of the
+ form
+ ``projects//instances//databases//databaseRoles/``
+ where ```` is as specified in the ``CREATE ROLE`` DDL
+ statement.
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+
+
+class ListDatabaseRolesRequest(proto.Message):
+ r"""The request for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+
+ Attributes:
+ parent (str):
+ Required. The database whose roles should be listed. Values
+ are of the form
+ ``projects//instances//databases/``.
+ page_size (int):
+ Number of database roles to be returned in
+ the response. If 0 or less, defaults to the
+ server's maximum allowed page size.
+ page_token (str):
+ If non-empty, ``page_token`` should contain a
+ [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
+ from a previous
+ [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
+ """
+
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ page_size: int = proto.Field(
+ proto.INT32,
+ number=2,
+ )
+ page_token: str = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+
+
+class ListDatabaseRolesResponse(proto.Message):
+ r"""The response for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+
+ Attributes:
+ database_roles (MutableSequence[google.cloud.spanner_admin_database_v1.types.DatabaseRole]):
+ Database roles that matched the request.
+ next_page_token (str):
+ ``next_page_token`` can be sent in a subsequent
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]
+ call to fetch more of the matching roles.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ database_roles: MutableSequence["DatabaseRole"] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message="DatabaseRole",
+ )
+ next_page_token: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+
+
+class AddSplitPointsRequest(proto.Message):
+ r"""The request for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+
+ Attributes:
+ database (str):
+ Required. The database on whose tables/indexes split points
+ are to be added. Values are of the form
+ ``projects//instances//databases/``.
+ split_points (MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints]):
+ Required. The split points to add.
+ initiator (str):
+ Optional. A user-supplied tag associated with the split
+ points. For example, "intital_data_load", "special_event_1".
+ Defaults to "CloudAddSplitPointsAPI" if not specified. The
+ length of the tag must not exceed 50 characters,else will be
+ trimmed. Only valid UTF8 characters are allowed.
+ """
+
+ database: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ split_points: MutableSequence["SplitPoints"] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=2,
+ message="SplitPoints",
+ )
+ initiator: str = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+
+
+class AddSplitPointsResponse(proto.Message):
+ r"""The response for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+
+ """
+
+
+class SplitPoints(proto.Message):
+ r"""The split points of a table/index.
+
+ Attributes:
+ table (str):
+ The table to split.
+ index (str):
+ The index to split. If specified, the ``table`` field must
+ refer to the index's base table.
+ keys (MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints.Key]):
+ Required. The list of split keys, i.e., the
+ split boundaries.
+ expire_time (google.protobuf.timestamp_pb2.Timestamp):
+ Optional. The expiration timestamp of the
+ split points. A timestamp in the past means
+ immediate expiration. The maximum value can be
+ 30 days in the future. Defaults to 10 days in
+ the future if not specified.
+ """
+
+ class Key(proto.Message):
+ r"""A split key.
+
+ Attributes:
+ key_parts (google.protobuf.struct_pb2.ListValue):
+ Required. The column values making up the
+ split key.
+ """
+
+ key_parts: struct_pb2.ListValue = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message=struct_pb2.ListValue,
+ )
+
+ table: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ index: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ keys: MutableSequence[Key] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=3,
+ message=Key,
+ )
+ expire_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=5,
+ message=timestamp_pb2.Timestamp,
+ )
+
+
+class InternalUpdateGraphOperationRequest(proto.Message):
+ r"""Internal request proto, do not use directly.
+
+ Attributes:
+ database (str):
+ Internal field, do not use directly.
+ operation_id (str):
+ Internal field, do not use directly.
+ vm_identity_token (str):
+ Internal field, do not use directly.
+ progress (float):
+ Internal field, do not use directly.
+ status (google.rpc.status_pb2.Status):
+ Internal field, do not use directly.
+ """
+
+ database: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ operation_id: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ vm_identity_token: str = proto.Field(
+ proto.STRING,
+ number=5,
+ )
+ progress: float = proto.Field(
+ proto.DOUBLE,
+ number=3,
+ )
+ status: status_pb2.Status = proto.Field(
+ proto.MESSAGE,
+ number=6,
+ message=status_pb2.Status,
+ )
+
+
+class InternalUpdateGraphOperationResponse(proto.Message):
+ r"""Internal response proto, do not use directly."""
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/spanner_admin_instance_v1/__init__.py b/google/cloud/spanner_admin_instance_v1/__init__.py
index cdc373bcff..5368b59895 100644
--- a/google/cloud/spanner_admin_instance_v1/__init__.py
+++ b/google/cloud/spanner_admin_instance_v1/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,40 +13,98 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from google.cloud.spanner_admin_instance_v1 import gapic_version as package_version
+
+__version__ = package_version.__version__
+
from .services.instance_admin import InstanceAdminClient
from .services.instance_admin import InstanceAdminAsyncClient
+from .types.common import OperationProgress
+from .types.common import ReplicaSelection
+from .types.common import FulfillmentPeriod
+from .types.spanner_instance_admin import AutoscalingConfig
+from .types.spanner_instance_admin import CreateInstanceConfigMetadata
+from .types.spanner_instance_admin import CreateInstanceConfigRequest
from .types.spanner_instance_admin import CreateInstanceMetadata
+from .types.spanner_instance_admin import CreateInstancePartitionMetadata
+from .types.spanner_instance_admin import CreateInstancePartitionRequest
from .types.spanner_instance_admin import CreateInstanceRequest
+from .types.spanner_instance_admin import DeleteInstanceConfigRequest
+from .types.spanner_instance_admin import DeleteInstancePartitionRequest
from .types.spanner_instance_admin import DeleteInstanceRequest
+from .types.spanner_instance_admin import FreeInstanceMetadata
from .types.spanner_instance_admin import GetInstanceConfigRequest
+from .types.spanner_instance_admin import GetInstancePartitionRequest
from .types.spanner_instance_admin import GetInstanceRequest
from .types.spanner_instance_admin import Instance
from .types.spanner_instance_admin import InstanceConfig
+from .types.spanner_instance_admin import InstancePartition
+from .types.spanner_instance_admin import ListInstanceConfigOperationsRequest
+from .types.spanner_instance_admin import ListInstanceConfigOperationsResponse
from .types.spanner_instance_admin import ListInstanceConfigsRequest
from .types.spanner_instance_admin import ListInstanceConfigsResponse
+from .types.spanner_instance_admin import ListInstancePartitionOperationsRequest
+from .types.spanner_instance_admin import ListInstancePartitionOperationsResponse
+from .types.spanner_instance_admin import ListInstancePartitionsRequest
+from .types.spanner_instance_admin import ListInstancePartitionsResponse
from .types.spanner_instance_admin import ListInstancesRequest
from .types.spanner_instance_admin import ListInstancesResponse
+from .types.spanner_instance_admin import MoveInstanceMetadata
+from .types.spanner_instance_admin import MoveInstanceRequest
+from .types.spanner_instance_admin import MoveInstanceResponse
+from .types.spanner_instance_admin import ReplicaComputeCapacity
from .types.spanner_instance_admin import ReplicaInfo
+from .types.spanner_instance_admin import UpdateInstanceConfigMetadata
+from .types.spanner_instance_admin import UpdateInstanceConfigRequest
from .types.spanner_instance_admin import UpdateInstanceMetadata
+from .types.spanner_instance_admin import UpdateInstancePartitionMetadata
+from .types.spanner_instance_admin import UpdateInstancePartitionRequest
from .types.spanner_instance_admin import UpdateInstanceRequest
__all__ = (
"InstanceAdminAsyncClient",
+ "AutoscalingConfig",
+ "CreateInstanceConfigMetadata",
+ "CreateInstanceConfigRequest",
"CreateInstanceMetadata",
+ "CreateInstancePartitionMetadata",
+ "CreateInstancePartitionRequest",
"CreateInstanceRequest",
+ "DeleteInstanceConfigRequest",
+ "DeleteInstancePartitionRequest",
"DeleteInstanceRequest",
+ "FreeInstanceMetadata",
+ "FulfillmentPeriod",
"GetInstanceConfigRequest",
+ "GetInstancePartitionRequest",
"GetInstanceRequest",
"Instance",
"InstanceAdminClient",
"InstanceConfig",
+ "InstancePartition",
+ "ListInstanceConfigOperationsRequest",
+ "ListInstanceConfigOperationsResponse",
"ListInstanceConfigsRequest",
"ListInstanceConfigsResponse",
+ "ListInstancePartitionOperationsRequest",
+ "ListInstancePartitionOperationsResponse",
+ "ListInstancePartitionsRequest",
+ "ListInstancePartitionsResponse",
"ListInstancesRequest",
"ListInstancesResponse",
+ "MoveInstanceMetadata",
+ "MoveInstanceRequest",
+ "MoveInstanceResponse",
+ "OperationProgress",
+ "ReplicaComputeCapacity",
"ReplicaInfo",
+ "ReplicaSelection",
+ "UpdateInstanceConfigMetadata",
+ "UpdateInstanceConfigRequest",
"UpdateInstanceMetadata",
+ "UpdateInstancePartitionMetadata",
+ "UpdateInstancePartitionRequest",
"UpdateInstanceRequest",
)
diff --git a/google/cloud/spanner_admin_instance_v1/gapic_metadata.json b/google/cloud/spanner_admin_instance_v1/gapic_metadata.json
index 6fee5bcd53..60fa46718a 100644
--- a/google/cloud/spanner_admin_instance_v1/gapic_metadata.json
+++ b/google/cloud/spanner_admin_instance_v1/gapic_metadata.json
@@ -15,11 +15,31 @@
"create_instance"
]
},
+ "CreateInstanceConfig": {
+ "methods": [
+ "create_instance_config"
+ ]
+ },
+ "CreateInstancePartition": {
+ "methods": [
+ "create_instance_partition"
+ ]
+ },
"DeleteInstance": {
"methods": [
"delete_instance"
]
},
+ "DeleteInstanceConfig": {
+ "methods": [
+ "delete_instance_config"
+ ]
+ },
+ "DeleteInstancePartition": {
+ "methods": [
+ "delete_instance_partition"
+ ]
+ },
"GetIamPolicy": {
"methods": [
"get_iam_policy"
@@ -35,16 +55,41 @@
"get_instance_config"
]
},
+ "GetInstancePartition": {
+ "methods": [
+ "get_instance_partition"
+ ]
+ },
+ "ListInstanceConfigOperations": {
+ "methods": [
+ "list_instance_config_operations"
+ ]
+ },
"ListInstanceConfigs": {
"methods": [
"list_instance_configs"
]
},
+ "ListInstancePartitionOperations": {
+ "methods": [
+ "list_instance_partition_operations"
+ ]
+ },
+ "ListInstancePartitions": {
+ "methods": [
+ "list_instance_partitions"
+ ]
+ },
"ListInstances": {
"methods": [
"list_instances"
]
},
+ "MoveInstance": {
+ "methods": [
+ "move_instance"
+ ]
+ },
"SetIamPolicy": {
"methods": [
"set_iam_policy"
@@ -59,6 +104,16 @@
"methods": [
"update_instance"
]
+ },
+ "UpdateInstanceConfig": {
+ "methods": [
+ "update_instance_config"
+ ]
+ },
+ "UpdateInstancePartition": {
+ "methods": [
+ "update_instance_partition"
+ ]
}
}
},
@@ -70,11 +125,31 @@
"create_instance"
]
},
+ "CreateInstanceConfig": {
+ "methods": [
+ "create_instance_config"
+ ]
+ },
+ "CreateInstancePartition": {
+ "methods": [
+ "create_instance_partition"
+ ]
+ },
"DeleteInstance": {
"methods": [
"delete_instance"
]
},
+ "DeleteInstanceConfig": {
+ "methods": [
+ "delete_instance_config"
+ ]
+ },
+ "DeleteInstancePartition": {
+ "methods": [
+ "delete_instance_partition"
+ ]
+ },
"GetIamPolicy": {
"methods": [
"get_iam_policy"
@@ -90,16 +165,41 @@
"get_instance_config"
]
},
+ "GetInstancePartition": {
+ "methods": [
+ "get_instance_partition"
+ ]
+ },
+ "ListInstanceConfigOperations": {
+ "methods": [
+ "list_instance_config_operations"
+ ]
+ },
"ListInstanceConfigs": {
"methods": [
"list_instance_configs"
]
},
+ "ListInstancePartitionOperations": {
+ "methods": [
+ "list_instance_partition_operations"
+ ]
+ },
+ "ListInstancePartitions": {
+ "methods": [
+ "list_instance_partitions"
+ ]
+ },
"ListInstances": {
"methods": [
"list_instances"
]
},
+ "MoveInstance": {
+ "methods": [
+ "move_instance"
+ ]
+ },
"SetIamPolicy": {
"methods": [
"set_iam_policy"
@@ -114,6 +214,126 @@
"methods": [
"update_instance"
]
+ },
+ "UpdateInstanceConfig": {
+ "methods": [
+ "update_instance_config"
+ ]
+ },
+ "UpdateInstancePartition": {
+ "methods": [
+ "update_instance_partition"
+ ]
+ }
+ }
+ },
+ "rest": {
+ "libraryClient": "InstanceAdminClient",
+ "rpcs": {
+ "CreateInstance": {
+ "methods": [
+ "create_instance"
+ ]
+ },
+ "CreateInstanceConfig": {
+ "methods": [
+ "create_instance_config"
+ ]
+ },
+ "CreateInstancePartition": {
+ "methods": [
+ "create_instance_partition"
+ ]
+ },
+ "DeleteInstance": {
+ "methods": [
+ "delete_instance"
+ ]
+ },
+ "DeleteInstanceConfig": {
+ "methods": [
+ "delete_instance_config"
+ ]
+ },
+ "DeleteInstancePartition": {
+ "methods": [
+ "delete_instance_partition"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "get_iam_policy"
+ ]
+ },
+ "GetInstance": {
+ "methods": [
+ "get_instance"
+ ]
+ },
+ "GetInstanceConfig": {
+ "methods": [
+ "get_instance_config"
+ ]
+ },
+ "GetInstancePartition": {
+ "methods": [
+ "get_instance_partition"
+ ]
+ },
+ "ListInstanceConfigOperations": {
+ "methods": [
+ "list_instance_config_operations"
+ ]
+ },
+ "ListInstanceConfigs": {
+ "methods": [
+ "list_instance_configs"
+ ]
+ },
+ "ListInstancePartitionOperations": {
+ "methods": [
+ "list_instance_partition_operations"
+ ]
+ },
+ "ListInstancePartitions": {
+ "methods": [
+ "list_instance_partitions"
+ ]
+ },
+ "ListInstances": {
+ "methods": [
+ "list_instances"
+ ]
+ },
+ "MoveInstance": {
+ "methods": [
+ "move_instance"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "set_iam_policy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "test_iam_permissions"
+ ]
+ },
+ "UpdateInstance": {
+ "methods": [
+ "update_instance"
+ ]
+ },
+ "UpdateInstanceConfig": {
+ "methods": [
+ "update_instance_config"
+ ]
+ },
+ "UpdateInstancePartition": {
+ "methods": [
+ "update_instance_partition"
+ ]
}
}
}
diff --git a/google/cloud/spanner_admin_instance_v1/gapic_version.py b/google/cloud/spanner_admin_instance_v1/gapic_version.py
new file mode 100644
index 0000000000..fa3f4c040d
--- /dev/null
+++ b/google/cloud/spanner_admin_instance_v1/gapic_version.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+__version__ = "3.58.0" # {x-release-please-version}
diff --git a/google/cloud/spanner_admin_instance_v1/services/__init__.py b/google/cloud/spanner_admin_instance_v1/services/__init__.py
index 4de65971c2..cbf94b283c 100644
--- a/google/cloud/spanner_admin_instance_v1/services/__init__.py
+++ b/google/cloud/spanner_admin_instance_v1/services/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py
index 2ba47af654..51df22ca2e 100644
--- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py
+++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py
index 2b52431771..1e87fc5a63 100644
--- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py
+++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,18 +13,38 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+import logging as std_logging
from collections import OrderedDict
-import functools
import re
-from typing import Dict, Sequence, Tuple, Type, Union
-import pkg_resources
-
-import google.api_core.client_options as ClientOptions # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from typing import (
+ Dict,
+ Callable,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
+import uuid
+
+from google.cloud.spanner_admin_instance_v1 import gapic_version as package_version
+
+from google.api_core.client_options import ClientOptions
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry_async as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
+import google.protobuf
+
+
+try:
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
@@ -32,18 +52,32 @@
from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import InstanceAdminTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import InstanceAdminGrpcAsyncIOTransport
from .client import InstanceAdminClient
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
+
class InstanceAdminAsyncClient:
"""Cloud Spanner Instance Admin API
+
The Cloud Spanner Instance Admin API can be used to create,
delete, modify and list instances. Instances are dedicated Cloud
Spanner serving and storage resources to be used by Cloud
Spanner databases.
+
Each instance has a "configuration", which dictates where the
serving resources for the Cloud Spanner instance are located
(e.g., US-central, Europe). Configurations are created by Google
@@ -64,8 +98,12 @@ class InstanceAdminAsyncClient:
_client: InstanceAdminClient
+ # Copy defaults from the synchronous client for use here.
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
DEFAULT_ENDPOINT = InstanceAdminClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = InstanceAdminClient.DEFAULT_MTLS_ENDPOINT
+ _DEFAULT_ENDPOINT_TEMPLATE = InstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE
+ _DEFAULT_UNIVERSE = InstanceAdminClient._DEFAULT_UNIVERSE
instance_path = staticmethod(InstanceAdminClient.instance_path)
parse_instance_path = staticmethod(InstanceAdminClient.parse_instance_path)
@@ -73,6 +111,10 @@ class InstanceAdminAsyncClient:
parse_instance_config_path = staticmethod(
InstanceAdminClient.parse_instance_config_path
)
+ instance_partition_path = staticmethod(InstanceAdminClient.instance_partition_path)
+ parse_instance_partition_path = staticmethod(
+ InstanceAdminClient.parse_instance_partition_path
+ )
common_billing_account_path = staticmethod(
InstanceAdminClient.common_billing_account_path
)
@@ -131,6 +173,42 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
from_service_account_json = from_service_account_file
+ @classmethod
+ def get_mtls_endpoint_and_cert_source(
+ cls, client_options: Optional[ClientOptions] = None
+ ):
+ """Return the API endpoint and client cert source for mutual TLS.
+
+ The client cert source is determined in the following order:
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
+ client cert source is None.
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
+ default client cert source exists, use the default one; otherwise the client cert
+ source is None.
+
+ The API endpoint is determined in the following order:
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
+ default mTLS endpoint; if the environment variable is "never", use the default API
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
+ use the default API endpoint.
+
+ More details can be found at https://google.aip.dev/auth/4114.
+
+ Args:
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
+ in this method.
+
+ Returns:
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
+ client cert source to use.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
+ """
+ return InstanceAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
+
@property
def transport(self) -> InstanceAdminTransport:
"""Returns the transport used by the client instance.
@@ -140,19 +218,38 @@ def transport(self) -> InstanceAdminTransport:
"""
return self._client.transport
- get_transport_class = functools.partial(
- type(InstanceAdminClient).get_transport_class, type(InstanceAdminClient)
- )
+ @property
+ def api_endpoint(self):
+ """Return the API endpoint used by the client instance.
+
+ Returns:
+ str: The API endpoint used by the client instance.
+ """
+ return self._client._api_endpoint
+
+ @property
+ def universe_domain(self) -> str:
+ """Return the universe domain used by the client instance.
+
+ Returns:
+ str: The universe domain used
+ by the client instance.
+ """
+ return self._client._universe_domain
+
+ get_transport_class = InstanceAdminClient.get_transport_class
def __init__(
self,
*,
- credentials: ga_credentials.Credentials = None,
- transport: Union[str, InstanceAdminTransport] = "grpc_asyncio",
- client_options: ClientOptions = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ transport: Optional[
+ Union[str, InstanceAdminTransport, Callable[..., InstanceAdminTransport]]
+ ] = "grpc_asyncio",
+ client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiates the instance admin client.
+ """Instantiates the instance admin async client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -160,26 +257,43 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- transport (Union[str, ~.InstanceAdminTransport]): The
- transport to use. If set to None, a transport is chosen
- automatically.
- client_options (ClientOptions): Custom options for the client. It
- won't take effect if a ``transport`` instance is provided.
- (1) The ``api_endpoint`` property can be used to override the
- default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
- environment variable can also be used to override the endpoint:
+ transport (Optional[Union[str,InstanceAdminTransport,Callable[..., InstanceAdminTransport]]]):
+ The transport to use, or a Callable that constructs and returns a new transport to use.
+ If a Callable is given, it will be called with the same set of initialization
+ arguments as used in the InstanceAdminTransport constructor.
+ If set to None, a transport is chosen automatically.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
+ Custom options for the client.
+
+ 1. The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client when ``transport`` is
+ not explicitly provided. Only if this property is not set and
+ ``transport`` was not explicitly provided, the endpoint is
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
+ variable, which have one of the following values:
"always" (always use the default mTLS endpoint), "never" (always
- use the default regular endpoint) and "auto" (auto switch to the
- default mTLS endpoint if client certificate is present, this is
- the default value). However, the ``api_endpoint`` property takes
- precedence if provided.
- (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ use the default regular endpoint) and "auto" (auto-switch to the
+ default mTLS endpoint if client certificate is present; this is
+ the default value).
+
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
- to provide client certificate for mutual TLS transport. If
+ to provide a client certificate for mTLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
+ 3. The ``universe_domain`` property can be used to override the
+ default "googleapis.com" universe. Note that ``api_endpoint``
+ property still takes precedence; and ``universe_domain`` is
+ currently not supported for mTLS.
+
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
@@ -191,20 +305,73 @@ def __init__(
client_info=client_info,
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ ): # pragma: NO COVER
+ _LOGGER.debug(
+ "Created client `google.spanner.admin.instance_v1.InstanceAdminAsyncClient`.",
+ extra={
+ "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin",
+ "universeDomain": getattr(
+ self._client._transport._credentials, "universe_domain", ""
+ ),
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
+ "credentialsInfo": getattr(
+ self.transport._credentials, "get_cred_info", lambda: None
+ )(),
+ }
+ if hasattr(self._client._transport, "_credentials")
+ else {
+ "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin",
+ "credentialsType": None,
+ },
+ )
+
async def list_instance_configs(
self,
- request: spanner_instance_admin.ListInstanceConfigsRequest = None,
+ request: Optional[
+ Union[spanner_instance_admin.ListInstanceConfigsRequest, dict]
+ ] = None,
*,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListInstanceConfigsAsyncPager:
r"""Lists the supported instance configurations for a
given project.
+ Returns both Google-managed configurations and
+ user-managed configurations.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_instance_v1
+
+ async def sample_list_instance_configs():
+ # Create a client
+ client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_instance_v1.ListInstanceConfigsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_instance_configs(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
Args:
- request (:class:`google.cloud.spanner_admin_instance_v1.types.ListInstanceConfigsRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.ListInstanceConfigsRequest, dict]]):
The request object. The request for
[ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
parent (:class:`str`):
@@ -215,32 +382,40 @@ async def list_instance_configs(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstanceConfigsAsyncPager:
The response for
- [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
+ [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_instance_admin.ListInstanceConfigsRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_instance_admin.ListInstanceConfigsRequest):
+ request = spanner_instance_admin.ListInstanceConfigsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -249,21 +424,9 @@ async def list_instance_configs(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_instance_configs,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_instance_configs
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -271,13 +434,26 @@ async def list_instance_configs(
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListInstanceConfigsAsyncPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
@@ -285,18 +461,46 @@ async def list_instance_configs(
async def get_instance_config(
self,
- request: spanner_instance_admin.GetInstanceConfigRequest = None,
+ request: Optional[
+ Union[spanner_instance_admin.GetInstanceConfigRequest, dict]
+ ] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_instance_admin.InstanceConfig:
r"""Gets information about a particular instance
configuration.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_instance_v1
+
+ async def sample_get_instance_config():
+ # Create a client
+ client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_instance_v1.GetInstanceConfigRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_instance_config(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
- request (:class:`google.cloud.spanner_admin_instance_v1.types.GetInstanceConfigRequest`):
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.GetInstanceConfigRequest, dict]]):
The request object. The request for
[GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig].
name (:class:`str`):
@@ -307,11 +511,13 @@ async def get_instance_config(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_instance_v1.types.InstanceConfig:
@@ -322,16 +528,22 @@ async def get_instance_config(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_instance_admin.GetInstanceConfigRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_instance_admin.GetInstanceConfigRequest):
+ request = spanner_instance_admin.GetInstanceConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -340,21 +552,9 @@ async def get_instance_config(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_instance_config,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_instance_config
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -362,306 +562,188 @@ async def get_instance_config(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
- # Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
-
- # Done; return the response.
- return response
-
- async def list_instances(
- self,
- request: spanner_instance_admin.ListInstancesRequest = None,
- *,
- parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> pagers.ListInstancesAsyncPager:
- r"""Lists all instances in the given project.
-
- Args:
- request (:class:`google.cloud.spanner_admin_instance_v1.types.ListInstancesRequest`):
- The request object. The request for
- [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
- parent (:class:`str`):
- Required. The name of the project for which a list of
- instances is requested. Values are of the form
- ``projects/``.
-
- This corresponds to the ``parent`` field
- on the ``request`` instance; if ``request`` is provided, this
- should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
- should be retried.
- timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
-
- Returns:
- google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancesAsyncPager:
- The response for
- [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
-
- Iterating over this object will yield results and
- resolve additional pages automatically.
-
- """
- # Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
- if request is not None and has_flattened_params:
- raise ValueError(
- "If the `request` argument is set, then none of "
- "the individual field arguments should be set."
- )
-
- request = spanner_instance_admin.ListInstancesRequest(request)
-
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
- if parent is not None:
- request.parent = parent
-
- # Wrap the RPC method; this adds retry and timeout information,
- # and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_instances,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
-
- # Certain fields should be provided within the metadata header;
- # add these here.
- metadata = tuple(metadata) + (
- gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
- )
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
-
- # This method is paged; wrap the response in a pager, which provides
- # an `__aiter__` convenience method.
- response = pagers.ListInstancesAsyncPager(
- method=rpc, request=request, response=response, metadata=metadata,
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
return response
- async def get_instance(
+ async def create_instance_config(
self,
- request: spanner_instance_admin.GetInstanceRequest = None,
+ request: Optional[
+ Union[spanner_instance_admin.CreateInstanceConfigRequest, dict]
+ ] = None,
*,
- name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> spanner_instance_admin.Instance:
- r"""Gets information about a particular instance.
-
- Args:
- request (:class:`google.cloud.spanner_admin_instance_v1.types.GetInstanceRequest`):
- The request object. The request for
- [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance].
- name (:class:`str`):
- Required. The name of the requested instance. Values are
- of the form ``projects//instances/``.
-
- This corresponds to the ``name`` field
- on the ``request`` instance; if ``request`` is provided, this
- should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
- should be retried.
- timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ parent: Optional[str] = None,
+ instance_config: Optional[spanner_instance_admin.InstanceConfig] = None,
+ instance_config_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Creates an instance configuration and begins preparing it to be
+ used. The returned long-running operation can be used to track
+ the progress of preparing the new instance configuration. The
+ instance configuration name is assigned by the caller. If the
+ named instance configuration already exists,
+ ``CreateInstanceConfig`` returns ``ALREADY_EXISTS``.
- Returns:
- google.cloud.spanner_admin_instance_v1.types.Instance:
- An isolated set of Cloud Spanner
- resources on which databases can be
- hosted.
+ Immediately after the request returns:
- """
- # Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
- if request is not None and has_flattened_params:
- raise ValueError(
- "If the `request` argument is set, then none of "
- "the individual field arguments should be set."
- )
+ - The instance configuration is readable via the API, with all
+ requested attributes. The instance configuration's
+ [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ field is set to true. Its state is ``CREATING``.
- request = spanner_instance_admin.GetInstanceRequest(request)
+ While the operation is pending:
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
- if name is not None:
- request.name = name
+ - Cancelling the operation renders the instance configuration
+ immediately unreadable via the API.
+ - Except for deleting the creating resource, all other attempts
+ to modify the instance configuration are rejected.
- # Wrap the RPC method; this adds retry and timeout information,
- # and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_instance,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ Upon completion of the returned operation:
- # Certain fields should be provided within the metadata header;
- # add these here.
- metadata = tuple(metadata) + (
- gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
- )
+ - Instances can be created using the instance configuration.
+ - The instance configuration's
+ [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ field becomes false. Its state becomes ``READY``.
- # Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+ The returned long-running operation will have a name of the
+ format ``/operations/`` and
+ can be used to track creation of the instance configuration. The
+ metadata field type is
+ [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
+ The response field type is
+ [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig],
+ if successful.
- # Done; return the response.
- return response
+ Authorization requires ``spanner.instanceConfigs.create``
+ permission on the resource
+ [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent].
- async def create_instance(
- self,
- request: spanner_instance_admin.CreateInstanceRequest = None,
- *,
- parent: str = None,
- instance_id: str = None,
- instance: spanner_instance_admin.Instance = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> operation_async.AsyncOperation:
- r"""Creates an instance and begins preparing it to begin serving.
- The returned [long-running
- operation][google.longrunning.Operation] can be used to track
- the progress of preparing the new instance. The instance name is
- assigned by the caller. If the named instance already exists,
- ``CreateInstance`` returns ``ALREADY_EXISTS``.
+ .. code-block:: python
- Immediately upon completion of this request:
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_instance_v1
- - The instance is readable via the API, with all requested
- attributes but no allocated resources. Its state is
- ``CREATING``.
+ async def sample_create_instance_config():
+ # Create a client
+ client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
- Until completion of the returned operation:
+ # Initialize request argument(s)
+ request = spanner_admin_instance_v1.CreateInstanceConfigRequest(
+ parent="parent_value",
+ instance_config_id="instance_config_id_value",
+ )
- - Cancelling the operation renders the instance immediately
- unreadable via the API.
- - The instance can be deleted.
- - All other attempts to modify the instance are rejected.
+ # Make the request
+ operation = client.create_instance_config(request=request)
- Upon completion of the returned operation:
+ print("Waiting for operation to complete...")
- - Billing for all successfully-allocated resources begins (some
- types may have lower than the requested levels).
- - Databases can be created in the instance.
- - The instance's allocated resource levels are readable via the
- API.
- - The instance's state becomes ``READY``.
+ response = (await operation).result()
- The returned [long-running
- operation][google.longrunning.Operation] will have a name of the
- format ``