diff --git a/.github/workflows/mock_server_tests.yaml b/.github/workflows/mock_server_tests.yaml
new file mode 100644
index 0000000000..2da5320071
--- /dev/null
+++ b/.github/workflows/mock_server_tests.yaml
@@ -0,0 +1,21 @@
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+name: Run Spanner tests against an in-mem mock server
+jobs:
+ system-tests:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ - name: Setup Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: 3.12
+ - name: Install nox
+ run: python -m pip install nox
+ - name: Run mock server tests
+ run: nox -s mockserver
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 7c20592b72..b4ec2efce5 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "3.50.1"
+ ".": "3.51.0"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 43229596ba..4d2eb31d6a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,33 @@
[1]: https://pypi.org/project/google-cloud-spanner/#history
+## [3.51.0](https://github.com/googleapis/python-spanner/compare/v3.50.1...v3.51.0) (2024-12-05)
+
+
+### Features
+
+* Add connection variable for ignoring transaction warnings ([#1249](https://github.com/googleapis/python-spanner/issues/1249)) ([eeb7836](https://github.com/googleapis/python-spanner/commit/eeb7836b6350aa9626dfb733208e6827d38bb9c9))
+* **spanner:** Implement custom tracer_provider injection for opentelemetry traces ([#1229](https://github.com/googleapis/python-spanner/issues/1229)) ([6869ed6](https://github.com/googleapis/python-spanner/commit/6869ed651e41d7a8af046884bc6c792a4177f766))
+* Support float32 parameters in dbapi ([#1245](https://github.com/googleapis/python-spanner/issues/1245)) ([829b799](https://github.com/googleapis/python-spanner/commit/829b799e0c9c6da274bf95c272cda564cfdba928))
+
+
+### Bug Fixes
+
+* Allow setting connection.read_only to same value ([#1247](https://github.com/googleapis/python-spanner/issues/1247)) ([5e8ca94](https://github.com/googleapis/python-spanner/commit/5e8ca949b583fbcf0b92b42696545973aad8c78f))
+* Allow setting staleness to same value in tx ([#1253](https://github.com/googleapis/python-spanner/issues/1253)) ([a214885](https://github.com/googleapis/python-spanner/commit/a214885ed474f3d69875ef580d5f8cbbabe9199a))
+* Dbapi raised AttributeError with [] as arguments ([#1257](https://github.com/googleapis/python-spanner/issues/1257)) ([758bf48](https://github.com/googleapis/python-spanner/commit/758bf4889a7f3346bc8282a3eed47aee43be650c))
+
+
+### Performance Improvements
+
+* Optimize ResultSet decoding ([#1244](https://github.com/googleapis/python-spanner/issues/1244)) ([ccae6e0](https://github.com/googleapis/python-spanner/commit/ccae6e0287ba6cf3c14f15a907b2106b11ef1fdc))
+* Remove repeated GetSession calls for FixedSizePool ([#1252](https://github.com/googleapis/python-spanner/issues/1252)) ([c064815](https://github.com/googleapis/python-spanner/commit/c064815abaaa4b564edd6f0e365a37e7e839080c))
+
+
+### Documentation
+
+* **samples:** Add samples for Cloud Spanner Default Backup Schedules ([#1238](https://github.com/googleapis/python-spanner/issues/1238)) ([054a186](https://github.com/googleapis/python-spanner/commit/054a18658eedc5d4dbecb7508baa3f3d67f5b815))
+
## [3.50.1](https://github.com/googleapis/python-spanner/compare/v3.50.0...v3.50.1) (2024-11-14)
diff --git a/docs/opentelemetry-tracing.rst b/docs/opentelemetry-tracing.rst
index cb9a2b1350..c715ad58ad 100644
--- a/docs/opentelemetry-tracing.rst
+++ b/docs/opentelemetry-tracing.rst
@@ -25,12 +25,21 @@ We also need to tell OpenTelemetry which exporter to use. To export Spanner trac
# Create and export one trace every 1000 requests
sampler = TraceIdRatioBased(1/1000)
- # Use the default tracer provider
- trace.set_tracer_provider(TracerProvider(sampler=sampler))
- trace.get_tracer_provider().add_span_processor(
+ tracer_provider = TracerProvider(sampler=sampler)
+ tracer_provider.add_span_processor(
# Initialize the cloud tracing exporter
BatchSpanProcessor(CloudTraceSpanExporter())
)
+ observability_options = dict(
+ tracer_provider=tracer_provider,
+
+ # By default extended_tracing is set to True due
+ # to legacy reasons to avoid breaking changes, you
+ # can modify it though using the environment variable
+ # SPANNER_ENABLE_EXTENDED_TRACING=false.
+ enable_extended_tracing=False,
+ )
+ spanner = spanner.NewClient(project_id, observability_options=observability_options)
To get more fine-grained traces from gRPC, you can enable the gRPC instrumentation by the following
@@ -52,3 +61,13 @@ Generated spanner traces should now be available on `Cloud Trace `_
+
+Annotating spans with SQL
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default your spans will be annotated with SQL statements where appropriate, but that can be a PII (Personally Identifiable Information)
+leak. Sadly due to legacy behavior, we cannot simply turn off this behavior by default. However you can control this behavior by setting
+
+ SPANNER_ENABLE_EXTENDED_TRACING=false
+
+to turn it off globally or when creating each SpannerClient, please set `observability_options.enable_extended_tracing=false`
diff --git a/examples/trace.py b/examples/trace.py
index 791b6cd20b..e7659e13e2 100644
--- a/examples/trace.py
+++ b/examples/trace.py
@@ -32,15 +32,18 @@ def main():
tracer_provider = TracerProvider(sampler=ALWAYS_ON)
trace_exporter = CloudTraceSpanExporter(project_id=project_id)
tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter))
- trace.set_tracer_provider(tracer_provider)
- # Retrieve a tracer from the global tracer provider.
- tracer = tracer_provider.get_tracer('MyApp')
# Setup the Cloud Spanner Client.
- spanner_client = spanner.Client(project_id)
+ spanner_client = spanner.Client(
+ project_id,
+ observability_options=dict(tracer_provider=tracer_provider, enable_extended_tracing=True),
+ )
instance = spanner_client.instance('test-instance')
database = instance.database('test-db')
+ # Retrieve a tracer from our custom tracer provider.
+ tracer = tracer_provider.get_tracer('MyApp')
+
# Now run our queries
with tracer.start_as_current_span('QueryInformationSchema'):
with database.snapshot() as snapshot:
diff --git a/google/cloud/spanner_admin_database_v1/gapic_version.py b/google/cloud/spanner_admin_database_v1/gapic_version.py
index 873057e050..99e11c0cb5 100644
--- a/google/cloud/spanner_admin_database_v1/gapic_version.py
+++ b/google/cloud/spanner_admin_database_v1/gapic_version.py
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "3.50.1" # {x-release-please-version}
+__version__ = "3.51.0" # {x-release-please-version}
diff --git a/google/cloud/spanner_admin_instance_v1/gapic_version.py b/google/cloud/spanner_admin_instance_v1/gapic_version.py
index 873057e050..99e11c0cb5 100644
--- a/google/cloud/spanner_admin_instance_v1/gapic_version.py
+++ b/google/cloud/spanner_admin_instance_v1/gapic_version.py
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "3.50.1" # {x-release-please-version}
+__version__ = "3.51.0" # {x-release-please-version}
diff --git a/google/cloud/spanner_dbapi/connection.py b/google/cloud/spanner_dbapi/connection.py
index b02d62ea27..cec6c64dac 100644
--- a/google/cloud/spanner_dbapi/connection.py
+++ b/google/cloud/spanner_dbapi/connection.py
@@ -89,9 +89,11 @@ class Connection:
committed by other transactions since the start of the read-only transaction. Commit or rolling back
the read-only transaction is semantically the same, and only indicates that the read-only transaction
should end a that a new one should be started when the next statement is executed.
+
+ **kwargs: Initial value for connection variables.
"""
- def __init__(self, instance, database=None, read_only=False):
+ def __init__(self, instance, database=None, read_only=False, **kwargs):
self._instance = instance
self._database = database
self._ddl_statements = []
@@ -117,6 +119,7 @@ def __init__(self, instance, database=None, read_only=False):
self._batch_dml_executor: BatchDmlExecutor = None
self._transaction_helper = TransactionRetryHelper(self)
self._autocommit_dml_mode: AutocommitDmlMode = AutocommitDmlMode.TRANSACTIONAL
+ self._connection_variables = kwargs
@property
def spanner_client(self):
@@ -206,6 +209,10 @@ def _client_transaction_started(self):
"""
return (not self._autocommit) or self._transaction_begin_marked
+ @property
+ def _ignore_transaction_warnings(self):
+ return self._connection_variables.get("ignore_transaction_warnings", False)
+
@property
def instance(self):
"""Instance to which this connection relates.
@@ -232,7 +239,7 @@ def read_only(self, value):
Args:
value (bool): True for ReadOnly mode, False for ReadWrite.
"""
- if self._spanner_transaction_started:
+ if self._read_only != value and self._spanner_transaction_started:
raise ValueError(
"Connection read/write mode can't be changed while a transaction is in progress. "
"Commit or rollback the current transaction and try again."
@@ -270,7 +277,7 @@ def staleness(self, value):
Args:
value (dict): Staleness type and value.
"""
- if self._spanner_transaction_started:
+ if self._spanner_transaction_started and value != self._staleness:
raise ValueError(
"`staleness` option can't be changed while a transaction is in progress. "
"Commit or rollback the current transaction and try again."
@@ -398,9 +405,10 @@ def commit(self):
if self.database is None:
raise ValueError("Database needs to be passed for this operation")
if not self._client_transaction_started:
- warnings.warn(
- CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2
- )
+ if not self._ignore_transaction_warnings:
+ warnings.warn(
+ CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2
+ )
return
self.run_prior_DDL_statements()
@@ -418,9 +426,10 @@ def rollback(self):
This is a no-op if there is no active client transaction.
"""
if not self._client_transaction_started:
- warnings.warn(
- CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2
- )
+ if not self._ignore_transaction_warnings:
+ warnings.warn(
+ CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2
+ )
return
try:
if self._spanner_transaction_started and not self._read_only:
@@ -654,6 +663,7 @@ def connect(
user_agent=None,
client=None,
route_to_leader_enabled=True,
+ **kwargs,
):
"""Creates a connection to a Google Cloud Spanner database.
@@ -696,6 +706,8 @@ def connect(
disable leader aware routing. Disabling leader aware routing would
route all requests in RW/PDML transactions to the closest region.
+ **kwargs: Initial value for connection variables.
+
:rtype: :class:`google.cloud.spanner_dbapi.connection.Connection`
:returns: Connection object associated with the given Google Cloud Spanner
diff --git a/google/cloud/spanner_dbapi/parse_utils.py b/google/cloud/spanner_dbapi/parse_utils.py
index 403550640e..f039efe5b0 100644
--- a/google/cloud/spanner_dbapi/parse_utils.py
+++ b/google/cloud/spanner_dbapi/parse_utils.py
@@ -29,12 +29,19 @@
from .types import DateStr, TimestampStr
from .utils import sanitize_literals_for_upload
+# Note: This mapping deliberately does not contain a value for float.
+# The reason for that is that it is better to just let Spanner determine
+# the parameter type instead of specifying one explicitly. The reason for
+# this is that if the client specifies FLOAT64, and the actual column that
+# the parameter is used for is of type FLOAT32, then Spanner will return an
+# error. If however the client does not specify a type, then Spanner will
+# automatically choose the appropriate type based on the column where the
+# value will be inserted/updated or that it will be compared with.
TYPES_MAP = {
bool: spanner.param_types.BOOL,
bytes: spanner.param_types.BYTES,
str: spanner.param_types.STRING,
int: spanner.param_types.INT64,
- float: spanner.param_types.FLOAT64,
datetime.datetime: spanner.param_types.TIMESTAMP,
datetime.date: spanner.param_types.DATE,
DateStr: spanner.param_types.DATE,
diff --git a/google/cloud/spanner_v1/_helpers.py b/google/cloud/spanner_v1/_helpers.py
index a1d6a60cb0..a4d66fc20f 100644
--- a/google/cloud/spanner_v1/_helpers.py
+++ b/google/cloud/spanner_v1/_helpers.py
@@ -266,66 +266,69 @@ def _parse_value_pb(value_pb, field_type, field_name, column_info=None):
:returns: value extracted from value_pb
:raises ValueError: if unknown type is passed
"""
+ decoder = _get_type_decoder(field_type, field_name, column_info)
+ return _parse_nullable(value_pb, decoder)
+
+
+def _get_type_decoder(field_type, field_name, column_info=None):
+ """Returns a function that converts a Value protobuf to cell data.
+
+ :type field_type: :class:`~google.cloud.spanner_v1.types.Type`
+ :param field_type: type code for the value
+
+ :type field_name: str
+ :param field_name: column name
+
+ :type column_info: dict
+ :param column_info: (Optional) dict of column name and column information.
+ An object where column names as keys and custom objects as corresponding
+ values for deserialization. It's specifically useful for data types like
+ protobuf where deserialization logic is on user-specific code. When provided,
+ the custom object enables deserialization of backend-received column data.
+ If not provided, data remains serialized as bytes for Proto Messages and
+ integer for Proto Enums.
+
+ :rtype: a function that takes a single protobuf value as an input argument
+ :returns: a function that can be used to extract a value from a protobuf value
+ :raises ValueError: if unknown type is passed
+ """
+
type_code = field_type.code
- if value_pb.HasField("null_value"):
- return None
if type_code == TypeCode.STRING:
- return value_pb.string_value
+ return _parse_string
elif type_code == TypeCode.BYTES:
- return value_pb.string_value.encode("utf8")
+ return _parse_bytes
elif type_code == TypeCode.BOOL:
- return value_pb.bool_value
+ return _parse_bool
elif type_code == TypeCode.INT64:
- return int(value_pb.string_value)
+ return _parse_int64
elif type_code == TypeCode.FLOAT64:
- if value_pb.HasField("string_value"):
- return float(value_pb.string_value)
- else:
- return value_pb.number_value
+ return _parse_float
elif type_code == TypeCode.FLOAT32:
- if value_pb.HasField("string_value"):
- return float(value_pb.string_value)
- else:
- return value_pb.number_value
+ return _parse_float
elif type_code == TypeCode.DATE:
- return _date_from_iso8601_date(value_pb.string_value)
+ return _parse_date
elif type_code == TypeCode.TIMESTAMP:
- DatetimeWithNanoseconds = datetime_helpers.DatetimeWithNanoseconds
- return DatetimeWithNanoseconds.from_rfc3339(value_pb.string_value)
- elif type_code == TypeCode.ARRAY:
- return [
- _parse_value_pb(
- item_pb, field_type.array_element_type, field_name, column_info
- )
- for item_pb in value_pb.list_value.values
- ]
- elif type_code == TypeCode.STRUCT:
- return [
- _parse_value_pb(
- item_pb, field_type.struct_type.fields[i].type_, field_name, column_info
- )
- for (i, item_pb) in enumerate(value_pb.list_value.values)
- ]
+ return _parse_timestamp
elif type_code == TypeCode.NUMERIC:
- return decimal.Decimal(value_pb.string_value)
+ return _parse_numeric
elif type_code == TypeCode.JSON:
- return JsonObject.from_str(value_pb.string_value)
+ return _parse_json
elif type_code == TypeCode.PROTO:
- bytes_value = base64.b64decode(value_pb.string_value)
- if column_info is not None and column_info.get(field_name) is not None:
- default_proto_message = column_info.get(field_name)
- if isinstance(default_proto_message, Message):
- proto_message = type(default_proto_message)()
- proto_message.ParseFromString(bytes_value)
- return proto_message
- return bytes_value
+ return lambda value_pb: _parse_proto(value_pb, column_info, field_name)
elif type_code == TypeCode.ENUM:
- int_value = int(value_pb.string_value)
- if column_info is not None and column_info.get(field_name) is not None:
- proto_enum = column_info.get(field_name)
- if isinstance(proto_enum, EnumTypeWrapper):
- return proto_enum.Name(int_value)
- return int_value
+ return lambda value_pb: _parse_proto_enum(value_pb, column_info, field_name)
+ elif type_code == TypeCode.ARRAY:
+ element_decoder = _get_type_decoder(
+ field_type.array_element_type, field_name, column_info
+ )
+ return lambda value_pb: _parse_array(value_pb, element_decoder)
+ elif type_code == TypeCode.STRUCT:
+ element_decoders = [
+ _get_type_decoder(item_field.type_, field_name, column_info)
+ for item_field in field_type.struct_type.fields
+ ]
+ return lambda value_pb: _parse_struct(value_pb, element_decoders)
else:
raise ValueError("Unknown type: %s" % (field_type,))
@@ -351,6 +354,87 @@ def _parse_list_value_pbs(rows, row_type):
return result
+def _parse_string(value_pb) -> str:
+ return value_pb.string_value
+
+
+def _parse_bytes(value_pb):
+ return value_pb.string_value.encode("utf8")
+
+
+def _parse_bool(value_pb) -> bool:
+ return value_pb.bool_value
+
+
+def _parse_int64(value_pb) -> int:
+ return int(value_pb.string_value)
+
+
+def _parse_float(value_pb) -> float:
+ if value_pb.HasField("string_value"):
+ return float(value_pb.string_value)
+ else:
+ return value_pb.number_value
+
+
+def _parse_date(value_pb):
+ return _date_from_iso8601_date(value_pb.string_value)
+
+
+def _parse_timestamp(value_pb):
+ DatetimeWithNanoseconds = datetime_helpers.DatetimeWithNanoseconds
+ return DatetimeWithNanoseconds.from_rfc3339(value_pb.string_value)
+
+
+def _parse_numeric(value_pb):
+ return decimal.Decimal(value_pb.string_value)
+
+
+def _parse_json(value_pb):
+ return JsonObject.from_str(value_pb.string_value)
+
+
+def _parse_proto(value_pb, column_info, field_name):
+ bytes_value = base64.b64decode(value_pb.string_value)
+ if column_info is not None and column_info.get(field_name) is not None:
+ default_proto_message = column_info.get(field_name)
+ if isinstance(default_proto_message, Message):
+ proto_message = type(default_proto_message)()
+ proto_message.ParseFromString(bytes_value)
+ return proto_message
+ return bytes_value
+
+
+def _parse_proto_enum(value_pb, column_info, field_name):
+ int_value = int(value_pb.string_value)
+ if column_info is not None and column_info.get(field_name) is not None:
+ proto_enum = column_info.get(field_name)
+ if isinstance(proto_enum, EnumTypeWrapper):
+ return proto_enum.Name(int_value)
+ return int_value
+
+
+def _parse_array(value_pb, element_decoder) -> []:
+ return [
+ _parse_nullable(item_pb, element_decoder)
+ for item_pb in value_pb.list_value.values
+ ]
+
+
+def _parse_struct(value_pb, element_decoders):
+ return [
+ _parse_nullable(item_pb, element_decoders[i])
+ for (i, item_pb) in enumerate(value_pb.list_value.values)
+ ]
+
+
+def _parse_nullable(value_pb, decoder):
+ if value_pb.HasField("null_value"):
+ return None
+ else:
+ return decoder(value_pb)
+
+
class _SessionWrapper(object):
"""Base class for objects wrapping a session.
diff --git a/google/cloud/spanner_v1/_opentelemetry_tracing.py b/google/cloud/spanner_v1/_opentelemetry_tracing.py
index 51501a07a3..efbeea05e7 100644
--- a/google/cloud/spanner_v1/_opentelemetry_tracing.py
+++ b/google/cloud/spanner_v1/_opentelemetry_tracing.py
@@ -15,6 +15,8 @@
"""Manages OpenTelemetry trace creation and handling"""
from contextlib import contextmanager
+from datetime import datetime
+import os
from google.cloud.spanner_v1 import SpannerClient
from google.cloud.spanner_v1 import gapic_version
@@ -33,6 +35,9 @@
TRACER_NAME = "cloud.google.com/python/spanner"
TRACER_VERSION = gapic_version.__version__
+extended_tracing_globally_disabled = (
+ os.getenv("SPANNER_ENABLE_EXTENDED_TRACING", "").lower() == "false"
+)
def get_tracer(tracer_provider=None):
@@ -51,13 +56,29 @@ def get_tracer(tracer_provider=None):
@contextmanager
-def trace_call(name, session, extra_attributes=None):
+def trace_call(name, session, extra_attributes=None, observability_options=None):
+ if session:
+ session._last_use_time = datetime.now()
+
if not HAS_OPENTELEMETRY_INSTALLED or not session:
# Empty context manager. Users will have to check if the generated value is None or a span
yield None
return
- tracer = get_tracer()
+ tracer_provider = None
+
+ # By default enable_extended_tracing=True because in a bid to minimize
+ # breaking changes and preserve legacy behavior, we are keeping it turned
+ # on by default.
+ enable_extended_tracing = True
+
+ if isinstance(observability_options, dict): # Avoid false positives with mock.Mock
+ tracer_provider = observability_options.get("tracer_provider", None)
+ enable_extended_tracing = observability_options.get(
+ "enable_extended_tracing", enable_extended_tracing
+ )
+
+ tracer = get_tracer(tracer_provider)
# Set base attributes that we know for every trace created
attributes = {
@@ -72,6 +93,12 @@ def trace_call(name, session, extra_attributes=None):
if extra_attributes:
attributes.update(extra_attributes)
+ if extended_tracing_globally_disabled:
+ enable_extended_tracing = False
+
+ if not enable_extended_tracing:
+ attributes.pop("db.statement", False)
+
with tracer.start_as_current_span(
name, kind=trace.SpanKind.CLIENT, attributes=attributes
) as span:
diff --git a/google/cloud/spanner_v1/batch.py b/google/cloud/spanner_v1/batch.py
index e3d681189c..948740d7d4 100644
--- a/google/cloud/spanner_v1/batch.py
+++ b/google/cloud/spanner_v1/batch.py
@@ -205,7 +205,13 @@ def commit(
max_commit_delay=max_commit_delay,
request_options=request_options,
)
- with trace_call("CloudSpanner.Commit", self._session, trace_attributes):
+ observability_options = getattr(database, "observability_options", None)
+ with trace_call(
+ "CloudSpanner.Commit",
+ self._session,
+ trace_attributes,
+ observability_options=observability_options,
+ ):
method = functools.partial(
api.commit,
request=request,
@@ -318,7 +324,13 @@ def batch_write(self, request_options=None, exclude_txn_from_change_streams=Fals
request_options=request_options,
exclude_txn_from_change_streams=exclude_txn_from_change_streams,
)
- with trace_call("CloudSpanner.BatchWrite", self._session, trace_attributes):
+ observability_options = getattr(database, "observability_options", None)
+ with trace_call(
+ "CloudSpanner.BatchWrite",
+ self._session,
+ trace_attributes,
+ observability_options=observability_options,
+ ):
method = functools.partial(
api.batch_write,
request=request,
diff --git a/google/cloud/spanner_v1/client.py b/google/cloud/spanner_v1/client.py
index f8f3fdb72c..afe6264717 100644
--- a/google/cloud/spanner_v1/client.py
+++ b/google/cloud/spanner_v1/client.py
@@ -126,6 +126,16 @@ class Client(ClientWithProject):
for all ReadRequests and ExecuteSqlRequests that indicates which replicas
or regions should be used for non-transactional reads or queries.
+ :type observability_options: dict (str -> any) or None
+ :param observability_options: (Optional) the configuration to control
+ the tracer's behavior.
+ tracer_provider is the injected tracer provider
+ enable_extended_tracing: :type:boolean when set to true will allow for
+ spans that issue SQL statements to be annotated with SQL.
+ Default `True`, please set it to `False` to turn it off
+ or you can use the environment variable `SPANNER_ENABLE_EXTENDED_TRACING=`
+ to control it.
+
:raises: :class:`ValueError ` if both ``read_only``
and ``admin`` are :data:`True`
"""
@@ -146,6 +156,7 @@ def __init__(
query_options=None,
route_to_leader_enabled=True,
directed_read_options=None,
+ observability_options=None,
):
self._emulator_host = _get_spanner_emulator_host()
@@ -187,6 +198,7 @@ def __init__(
self._route_to_leader_enabled = route_to_leader_enabled
self._directed_read_options = directed_read_options
+ self._observability_options = observability_options
@property
def credentials(self):
@@ -268,6 +280,15 @@ def route_to_leader_enabled(self):
"""
return self._route_to_leader_enabled
+ @property
+ def observability_options(self):
+ """Getter for observability_options.
+
+ :rtype: dict
+ :returns: The configured observability_options if set.
+ """
+ return self._observability_options
+
@property
def directed_read_options(self):
"""Getter for directed_read_options.
diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py
index f6c4ceb667..1e10e1df73 100644
--- a/google/cloud/spanner_v1/database.py
+++ b/google/cloud/spanner_v1/database.py
@@ -142,7 +142,7 @@ class Database(object):
statements in 'ddl_statements' above.
"""
- _spanner_api = None
+ _spanner_api: SpannerClient = None
def __init__(
self,
@@ -718,6 +718,7 @@ def execute_pdml():
method=method,
request=request,
transaction_selector=txn_selector,
+ observability_options=self.observability_options,
)
result_set = StreamedResultSet(iterator)
@@ -1106,6 +1107,17 @@ def set_iam_policy(self, policy):
response = api.set_iam_policy(request=request, metadata=metadata)
return response
+ @property
+ def observability_options(self):
+ """
+ Returns the observability options that you set when creating
+ the SpannerClient.
+ """
+ if not (self._instance and self._instance._client):
+ return None
+
+ return getattr(self._instance._client, "observability_options", None)
+
class BatchCheckout(object):
"""Context manager for using a batch from a database.
diff --git a/google/cloud/spanner_v1/gapic_version.py b/google/cloud/spanner_v1/gapic_version.py
index 873057e050..99e11c0cb5 100644
--- a/google/cloud/spanner_v1/gapic_version.py
+++ b/google/cloud/spanner_v1/gapic_version.py
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "3.50.1" # {x-release-please-version}
+__version__ = "3.51.0" # {x-release-please-version}
diff --git a/google/cloud/spanner_v1/pool.py b/google/cloud/spanner_v1/pool.py
index 56837bfc0b..c95ef7a7b9 100644
--- a/google/cloud/spanner_v1/pool.py
+++ b/google/cloud/spanner_v1/pool.py
@@ -145,7 +145,8 @@ class FixedSizePool(AbstractSessionPool):
- Pre-allocates / creates a fixed number of sessions.
- "Pings" existing sessions via :meth:`session.exists` before returning
- them, and replaces expired sessions.
+ sessions that have not been used for more than 55 minutes and replaces
+ expired sessions.
- Blocks, with a timeout, when :meth:`get` is called on an empty pool.
Raises after timing out.
@@ -171,6 +172,7 @@ class FixedSizePool(AbstractSessionPool):
DEFAULT_SIZE = 10
DEFAULT_TIMEOUT = 10
+ DEFAULT_MAX_AGE_MINUTES = 55
def __init__(
self,
@@ -178,11 +180,13 @@ def __init__(
default_timeout=DEFAULT_TIMEOUT,
labels=None,
database_role=None,
+ max_age_minutes=DEFAULT_MAX_AGE_MINUTES,
):
super(FixedSizePool, self).__init__(labels=labels, database_role=database_role)
self.size = size
self.default_timeout = default_timeout
self._sessions = queue.LifoQueue(size)
+ self._max_age = datetime.timedelta(minutes=max_age_minutes)
def bind(self, database):
"""Associate the pool with a database.
@@ -230,8 +234,9 @@ def get(self, timeout=None):
timeout = self.default_timeout
session = self._sessions.get(block=True, timeout=timeout)
+ age = _NOW() - session.last_use_time
- if not session.exists():
+ if age >= self._max_age and not session.exists():
session = self._database.session()
session.create()
diff --git a/google/cloud/spanner_v1/session.py b/google/cloud/spanner_v1/session.py
index 28280282f4..539f36af2b 100644
--- a/google/cloud/spanner_v1/session.py
+++ b/google/cloud/spanner_v1/session.py
@@ -17,6 +17,7 @@
from functools import total_ordering
import random
import time
+from datetime import datetime
from google.api_core.exceptions import Aborted
from google.api_core.exceptions import GoogleAPICallError
@@ -69,6 +70,7 @@ def __init__(self, database, labels=None, database_role=None):
labels = {}
self._labels = labels
self._database_role = database_role
+ self._last_use_time = datetime.utcnow()
def __lt__(self, other):
return self._session_id < other._session_id
@@ -78,6 +80,14 @@ def session_id(self):
"""Read-only ID, set by the back-end during :meth:`create`."""
return self._session_id
+ @property
+ def last_use_time(self):
+ """ "Approximate last use time of this session
+
+ :rtype: datetime
+ :returns: the approximate last use time of this session"""
+ return self._last_use_time
+
@property
def database_role(self):
"""User-assigned database-role for the session.
@@ -142,7 +152,13 @@ def create(self):
if self._labels:
request.session.labels = self._labels
- with trace_call("CloudSpanner.CreateSession", self, self._labels):
+ observability_options = getattr(self._database, "observability_options", None)
+ with trace_call(
+ "CloudSpanner.CreateSession",
+ self,
+ self._labels,
+ observability_options=observability_options,
+ ):
session_pb = api.create_session(
request=request,
metadata=metadata,
@@ -169,7 +185,10 @@ def exists(self):
)
)
- with trace_call("CloudSpanner.GetSession", self) as span:
+ observability_options = getattr(self._database, "observability_options", None)
+ with trace_call(
+ "CloudSpanner.GetSession", self, observability_options=observability_options
+ ) as span:
try:
api.get_session(name=self.name, metadata=metadata)
if span:
@@ -194,7 +213,12 @@ def delete(self):
raise ValueError("Session ID not set by back-end")
api = self._database.spanner_api
metadata = _metadata_with_prefix(self._database.name)
- with trace_call("CloudSpanner.DeleteSession", self):
+ observability_options = getattr(self._database, "observability_options", None)
+ with trace_call(
+ "CloudSpanner.DeleteSession",
+ self,
+ observability_options=observability_options,
+ ):
api.delete_session(name=self.name, metadata=metadata)
def ping(self):
@@ -208,6 +232,7 @@ def ping(self):
metadata = _metadata_with_prefix(self._database.name)
request = ExecuteSqlRequest(session=self.name, sql="SELECT 1")
api.execute_sql(request=request, metadata=metadata)
+ self._last_use_time = datetime.now()
def snapshot(self, **kw):
"""Create a snapshot to perform a set of reads with shared staleness.
diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py
index 3bc1a746bd..89b5094706 100644
--- a/google/cloud/spanner_v1/snapshot.py
+++ b/google/cloud/spanner_v1/snapshot.py
@@ -14,6 +14,7 @@
"""Model a set of read-only queries to a database as a snapshot."""
+from datetime import datetime
import functools
import threading
from google.protobuf.struct_pb2 import Struct
@@ -56,6 +57,7 @@ def _restart_on_unavailable(
attributes=None,
transaction=None,
transaction_selector=None,
+ observability_options=None,
):
"""Restart iteration after :exc:`.ServiceUnavailable`.
@@ -84,7 +86,10 @@ def _restart_on_unavailable(
)
request.transaction = transaction_selector
- with trace_call(trace_name, session, attributes):
+
+ with trace_call(
+ trace_name, session, attributes, observability_options=observability_options
+ ):
iterator = method(request=request)
while True:
try:
@@ -104,7 +109,12 @@ def _restart_on_unavailable(
break
except ServiceUnavailable:
del item_buffer[:]
- with trace_call(trace_name, session, attributes):
+ with trace_call(
+ trace_name,
+ session,
+ attributes,
+ observability_options=observability_options,
+ ):
request.resume_token = resume_token
if transaction is not None:
transaction_selector = transaction._make_txn_selector()
@@ -119,7 +129,12 @@ def _restart_on_unavailable(
if not resumable_error:
raise
del item_buffer[:]
- with trace_call(trace_name, session, attributes):
+ with trace_call(
+ trace_name,
+ session,
+ attributes,
+ observability_options=observability_options,
+ ):
request.resume_token = resume_token
if transaction is not None:
transaction_selector = transaction._make_txn_selector()
@@ -178,6 +193,7 @@ def read(
retry=gapic_v1.method.DEFAULT,
timeout=gapic_v1.method.DEFAULT,
column_info=None,
+ lazy_decode=False,
):
"""Perform a ``StreamingRead`` API request for rows in a table.
@@ -241,6 +257,18 @@ def read(
If not provided, data remains serialized as bytes for Proto Messages and
integer for Proto Enums.
+ :type lazy_decode: bool
+ :param lazy_decode:
+ (Optional) If this argument is set to ``true``, the iterator
+ returns the underlying protobuf values instead of decoded Python
+ objects. This reduces the time that is needed to iterate through
+ large result sets. The application is responsible for decoding
+ the data that is needed. The returned row iterator contains two
+ functions that can be used for this. ``iterator.decode_row(row)``
+ decodes all the columns in the given row to an array of Python
+ objects. ``iterator.decode_column(row, column_index)`` decodes one
+ specific column in the given row.
+
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
@@ -299,6 +327,7 @@ def read(
)
trace_attributes = {"table_id": table, "columns": columns}
+ observability_options = getattr(database, "observability_options", None)
if self._transaction_id is None:
# lock is added to handle the inline begin for first rpc
@@ -310,14 +339,20 @@ def read(
self._session,
trace_attributes,
transaction=self,
+ observability_options=observability_options,
)
self._read_request_count += 1
if self._multi_use:
return StreamedResultSet(
- iterator, source=self, column_info=column_info
+ iterator,
+ source=self,
+ column_info=column_info,
+ lazy_decode=lazy_decode,
)
else:
- return StreamedResultSet(iterator, column_info=column_info)
+ return StreamedResultSet(
+ iterator, column_info=column_info, lazy_decode=lazy_decode
+ )
else:
iterator = _restart_on_unavailable(
restart,
@@ -326,14 +361,20 @@ def read(
self._session,
trace_attributes,
transaction=self,
+ observability_options=observability_options,
)
self._read_request_count += 1
+ self._session._last_use_time = datetime.now()
if self._multi_use:
- return StreamedResultSet(iterator, source=self, column_info=column_info)
+ return StreamedResultSet(
+ iterator, source=self, column_info=column_info, lazy_decode=lazy_decode
+ )
else:
- return StreamedResultSet(iterator, column_info=column_info)
+ return StreamedResultSet(
+ iterator, column_info=column_info, lazy_decode=lazy_decode
+ )
def execute_sql(
self,
@@ -349,6 +390,7 @@ def execute_sql(
data_boost_enabled=False,
directed_read_options=None,
column_info=None,
+ lazy_decode=False,
):
"""Perform an ``ExecuteStreamingSql`` API request.
@@ -421,6 +463,18 @@ def execute_sql(
If not provided, data remains serialized as bytes for Proto Messages and
integer for Proto Enums.
+ :type lazy_decode: bool
+ :param lazy_decode:
+ (Optional) If this argument is set to ``true``, the iterator
+ returns the underlying protobuf values instead of decoded Python
+ objects. This reduces the time that is needed to iterate through
+ large result sets. The application is responsible for decoding
+ the data that is needed. The returned row iterator contains two
+ functions that can be used for this. ``iterator.decode_row(row)``
+ decodes all the columns in the given row to an array of Python
+ objects. ``iterator.decode_column(row, column_index)`` decodes one
+ specific column in the given row.
+
:raises ValueError:
for reuse of single-use snapshots, or if a transaction ID is
already pending for multiple-use snapshots.
@@ -489,19 +543,38 @@ def execute_sql(
)
trace_attributes = {"db.statement": sql}
+ observability_options = getattr(database, "observability_options", None)
if self._transaction_id is None:
# lock is added to handle the inline begin for first rpc
with self._lock:
return self._get_streamed_result_set(
- restart, request, trace_attributes, column_info
+ restart,
+ request,
+ trace_attributes,
+ column_info,
+ observability_options,
+ lazy_decode=lazy_decode,
)
else:
return self._get_streamed_result_set(
- restart, request, trace_attributes, column_info
+ restart,
+ request,
+ trace_attributes,
+ column_info,
+ observability_options,
+ lazy_decode=lazy_decode,
)
- def _get_streamed_result_set(self, restart, request, trace_attributes, column_info):
+ def _get_streamed_result_set(
+ self,
+ restart,
+ request,
+ trace_attributes,
+ column_info,
+ observability_options=None,
+ lazy_decode=False,
+ ):
iterator = _restart_on_unavailable(
restart,
request,
@@ -509,14 +582,19 @@ def _get_streamed_result_set(self, restart, request, trace_attributes, column_in
self._session,
trace_attributes,
transaction=self,
+ observability_options=observability_options,
)
self._read_request_count += 1
self._execute_sql_count += 1
if self._multi_use:
- return StreamedResultSet(iterator, source=self, column_info=column_info)
+ return StreamedResultSet(
+ iterator, source=self, column_info=column_info, lazy_decode=lazy_decode
+ )
else:
- return StreamedResultSet(iterator, column_info=column_info)
+ return StreamedResultSet(
+ iterator, column_info=column_info, lazy_decode=lazy_decode
+ )
def partition_read(
self,
@@ -598,7 +676,10 @@ def partition_read(
trace_attributes = {"table_id": table, "columns": columns}
with trace_call(
- "CloudSpanner.PartitionReadOnlyTransaction", self._session, trace_attributes
+ "CloudSpanner.PartitionReadOnlyTransaction",
+ self._session,
+ trace_attributes,
+ observability_options=getattr(database, "observability_options", None),
):
method = functools.partial(
api.partition_read,
@@ -701,6 +782,7 @@ def partition_query(
"CloudSpanner.PartitionReadWriteTransaction",
self._session,
trace_attributes,
+ observability_options=getattr(database, "observability_options", None),
):
method = functools.partial(
api.partition_query,
@@ -843,7 +925,11 @@ def begin(self):
(_metadata_with_leader_aware_routing(database._route_to_leader_enabled))
)
txn_selector = self._make_txn_selector()
- with trace_call("CloudSpanner.BeginTransaction", self._session):
+ with trace_call(
+ "CloudSpanner.BeginTransaction",
+ self._session,
+ observability_options=getattr(database, "observability_options", None),
+ ):
method = functools.partial(
api.begin_transaction,
session=self._session.name,
diff --git a/google/cloud/spanner_v1/streamed.py b/google/cloud/spanner_v1/streamed.py
index 89bde0e334..7c067e97b6 100644
--- a/google/cloud/spanner_v1/streamed.py
+++ b/google/cloud/spanner_v1/streamed.py
@@ -21,7 +21,7 @@
from google.cloud.spanner_v1 import PartialResultSet
from google.cloud.spanner_v1 import ResultSetMetadata
from google.cloud.spanner_v1 import TypeCode
-from google.cloud.spanner_v1._helpers import _parse_value_pb
+from google.cloud.spanner_v1._helpers import _get_type_decoder, _parse_nullable
class StreamedResultSet(object):
@@ -37,7 +37,13 @@ class StreamedResultSet(object):
:param source: Snapshot from which the result set was fetched.
"""
- def __init__(self, response_iterator, source=None, column_info=None):
+ def __init__(
+ self,
+ response_iterator,
+ source=None,
+ column_info=None,
+ lazy_decode: bool = False,
+ ):
self._response_iterator = response_iterator
self._rows = [] # Fully-processed rows
self._metadata = None # Until set from first PRS
@@ -46,6 +52,8 @@ def __init__(self, response_iterator, source=None, column_info=None):
self._pending_chunk = None # Incomplete value
self._source = source # Source snapshot
self._column_info = column_info # Column information
+ self._field_decoders = None
+ self._lazy_decode = lazy_decode # Return protobuf values
@property
def fields(self):
@@ -77,6 +85,17 @@ def stats(self):
"""
return self._stats
+ @property
+ def _decoders(self):
+ if self._field_decoders is None:
+ if self._metadata is None:
+ raise ValueError("iterator not started")
+ self._field_decoders = [
+ _get_type_decoder(field.type_, field.name, self._column_info)
+ for field in self.fields
+ ]
+ return self._field_decoders
+
def _merge_chunk(self, value):
"""Merge pending chunk with next value.
@@ -99,16 +118,14 @@ def _merge_values(self, values):
:type values: list of :class:`~google.protobuf.struct_pb2.Value`
:param values: non-chunked values from partial result set.
"""
- field_types = [field.type_ for field in self.fields]
- field_names = [field.name for field in self.fields]
- width = len(field_types)
+ decoders = self._decoders
+ width = len(self.fields)
index = len(self._current_row)
for value in values:
- self._current_row.append(
- _parse_value_pb(
- value, field_types[index], field_names[index], self._column_info
- )
- )
+ if self._lazy_decode:
+ self._current_row.append(value)
+ else:
+ self._current_row.append(_parse_nullable(value, decoders[index]))
index += 1
if index == width:
self._rows.append(self._current_row)
@@ -152,6 +169,34 @@ def __iter__(self):
except StopIteration:
return
+ def decode_row(self, row: []) -> []:
+ """Decodes a row from protobuf values to Python objects. This function
+ should only be called for result sets that use ``lazy_decoding=True``.
+ The array that is returned by this function is the same as the array
+ that would have been returned by the rows iterator if ``lazy_decoding=False``.
+
+ :returns: an array containing the decoded values of all the columns in the given row
+ """
+ if not hasattr(row, "__len__"):
+ raise TypeError("row", "row must be an array of protobuf values")
+ decoders = self._decoders
+ return [
+ _parse_nullable(row[index], decoders[index]) for index in range(len(row))
+ ]
+
+ def decode_column(self, row: [], column_index: int):
+ """Decodes a column from a protobuf value to a Python object. This function
+ should only be called for result sets that use ``lazy_decoding=True``.
+ The object that is returned by this function is the same as the object
+ that would have been returned by the rows iterator if ``lazy_decoding=False``.
+
+ :returns: the decoded column value
+ """
+ if not hasattr(row, "__len__"):
+ raise TypeError("row", "row must be an array of protobuf values")
+ decoders = self._decoders
+ return _parse_nullable(row[column_index], decoders[column_index])
+
def one(self):
"""Return exactly one result, or raise an exception.
diff --git a/google/cloud/spanner_v1/testing/__init__.py b/google/cloud/spanner_v1/testing/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/google/cloud/spanner_v1/testing/mock_database_admin.py b/google/cloud/spanner_v1/testing/mock_database_admin.py
new file mode 100644
index 0000000000..a9b4eb6392
--- /dev/null
+++ b/google/cloud/spanner_v1/testing/mock_database_admin.py
@@ -0,0 +1,38 @@
+# Copyright 2024 Google LLC All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.longrunning import operations_pb2 as operations_pb2
+from google.protobuf import empty_pb2
+import google.cloud.spanner_v1.testing.spanner_database_admin_pb2_grpc as database_admin_grpc
+
+
+# An in-memory mock DatabaseAdmin server that can be used for testing.
+class DatabaseAdminServicer(database_admin_grpc.DatabaseAdminServicer):
+ def __init__(self):
+ self._requests = []
+
+ @property
+ def requests(self):
+ return self._requests
+
+ def clear_requests(self):
+ self._requests = []
+
+ def UpdateDatabaseDdl(self, request, context):
+ self._requests.append(request)
+ operation = operations_pb2.Operation()
+ operation.done = True
+ operation.name = "projects/test-project/operations/test-operation"
+ operation.response.Pack(empty_pb2.Empty())
+ return operation
diff --git a/google/cloud/spanner_v1/testing/mock_spanner.py b/google/cloud/spanner_v1/testing/mock_spanner.py
new file mode 100644
index 0000000000..d01c63aff5
--- /dev/null
+++ b/google/cloud/spanner_v1/testing/mock_spanner.py
@@ -0,0 +1,216 @@
+# Copyright 2024 Google LLC All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import base64
+import grpc
+from concurrent import futures
+
+from google.protobuf import empty_pb2
+from google.cloud.spanner_v1.testing.mock_database_admin import DatabaseAdminServicer
+import google.cloud.spanner_v1.testing.spanner_database_admin_pb2_grpc as database_admin_grpc
+import google.cloud.spanner_v1.testing.spanner_pb2_grpc as spanner_grpc
+import google.cloud.spanner_v1.types.commit_response as commit
+import google.cloud.spanner_v1.types.result_set as result_set
+import google.cloud.spanner_v1.types.spanner as spanner
+import google.cloud.spanner_v1.types.transaction as transaction
+
+
+class MockSpanner:
+ def __init__(self):
+ self.results = {}
+
+ def add_result(self, sql: str, result: result_set.ResultSet):
+ self.results[sql.lower().strip()] = result
+
+ def get_result(self, sql: str) -> result_set.ResultSet:
+ result = self.results.get(sql.lower().strip())
+ if result is None:
+ raise ValueError(f"No result found for {sql}")
+ return result
+
+ def get_result_as_partial_result_sets(
+ self, sql: str
+ ) -> [result_set.PartialResultSet]:
+ result: result_set.ResultSet = self.get_result(sql)
+ partials = []
+ first = True
+ if len(result.rows) == 0:
+ partial = result_set.PartialResultSet()
+ partial.metadata = result.metadata
+ partials.append(partial)
+ else:
+ for row in result.rows:
+ partial = result_set.PartialResultSet()
+ if first:
+ partial.metadata = result.metadata
+ partial.values.extend(row)
+ partials.append(partial)
+ partials[len(partials) - 1].stats = result.stats
+ return partials
+
+
+# An in-memory mock Spanner server that can be used for testing.
+class SpannerServicer(spanner_grpc.SpannerServicer):
+ def __init__(self):
+ self._requests = []
+ self.session_counter = 0
+ self.sessions = {}
+ self.transaction_counter = 0
+ self.transactions = {}
+ self._mock_spanner = MockSpanner()
+
+ @property
+ def mock_spanner(self):
+ return self._mock_spanner
+
+ @property
+ def requests(self):
+ return self._requests
+
+ def clear_requests(self):
+ self._requests = []
+
+ def CreateSession(self, request, context):
+ self._requests.append(request)
+ return self.__create_session(request.database, request.session)
+
+ def BatchCreateSessions(self, request, context):
+ self._requests.append(request)
+ sessions = []
+ for i in range(request.session_count):
+ sessions.append(
+ self.__create_session(request.database, request.session_template)
+ )
+ return spanner.BatchCreateSessionsResponse(dict(session=sessions))
+
+ def __create_session(self, database: str, session_template: spanner.Session):
+ self.session_counter += 1
+ session = spanner.Session()
+ session.name = database + "/sessions/" + str(self.session_counter)
+ session.multiplexed = session_template.multiplexed
+ session.labels.MergeFrom(session_template.labels)
+ session.creator_role = session_template.creator_role
+ self.sessions[session.name] = session
+ return session
+
+ def GetSession(self, request, context):
+ self._requests.append(request)
+ return spanner.Session()
+
+ def ListSessions(self, request, context):
+ self._requests.append(request)
+ return [spanner.Session()]
+
+ def DeleteSession(self, request, context):
+ self._requests.append(request)
+ return empty_pb2.Empty()
+
+ def ExecuteSql(self, request, context):
+ self._requests.append(request)
+ return result_set.ResultSet()
+
+ def ExecuteStreamingSql(self, request, context):
+ self._requests.append(request)
+ partials = self.mock_spanner.get_result_as_partial_result_sets(request.sql)
+ for result in partials:
+ yield result
+
+ def ExecuteBatchDml(self, request, context):
+ self._requests.append(request)
+ response = spanner.ExecuteBatchDmlResponse()
+ started_transaction = None
+ if not request.transaction.begin == transaction.TransactionOptions():
+ started_transaction = self.__create_transaction(
+ request.session, request.transaction.begin
+ )
+ first = True
+ for statement in request.statements:
+ result = self.mock_spanner.get_result(statement.sql)
+ if first and started_transaction is not None:
+ result = result_set.ResultSet(
+ self.mock_spanner.get_result(statement.sql)
+ )
+ result.metadata = result_set.ResultSetMetadata(result.metadata)
+ result.metadata.transaction = started_transaction
+ response.result_sets.append(result)
+ return response
+
+ def Read(self, request, context):
+ self._requests.append(request)
+ return result_set.ResultSet()
+
+ def StreamingRead(self, request, context):
+ self._requests.append(request)
+ for result in [result_set.PartialResultSet(), result_set.PartialResultSet()]:
+ yield result
+
+ def BeginTransaction(self, request, context):
+ self._requests.append(request)
+ return self.__create_transaction(request.session, request.options)
+
+ def __create_transaction(
+ self, session: str, options: transaction.TransactionOptions
+ ) -> transaction.Transaction:
+ session = self.sessions[session]
+ if session is None:
+ raise ValueError(f"Session not found: {session}")
+ self.transaction_counter += 1
+ id_bytes = bytes(
+ f"{session.name}/transactions/{self.transaction_counter}", "UTF-8"
+ )
+ transaction_id = base64.urlsafe_b64encode(id_bytes)
+ self.transactions[transaction_id] = options
+ return transaction.Transaction(dict(id=transaction_id))
+
+ def Commit(self, request, context):
+ self._requests.append(request)
+ tx = self.transactions[request.transaction_id]
+ if tx is None:
+ raise ValueError(f"Transaction not found: {request.transaction_id}")
+ del self.transactions[request.transaction_id]
+ return commit.CommitResponse()
+
+ def Rollback(self, request, context):
+ self._requests.append(request)
+ return empty_pb2.Empty()
+
+ def PartitionQuery(self, request, context):
+ self._requests.append(request)
+ return spanner.PartitionResponse()
+
+ def PartitionRead(self, request, context):
+ self._requests.append(request)
+ return spanner.PartitionResponse()
+
+ def BatchWrite(self, request, context):
+ self._requests.append(request)
+ for result in [spanner.BatchWriteResponse(), spanner.BatchWriteResponse()]:
+ yield result
+
+
+def start_mock_server() -> (grpc.Server, SpannerServicer, DatabaseAdminServicer, int):
+ # Create a gRPC server.
+ spanner_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+
+ # Add the Spanner services to the gRPC server.
+ spanner_servicer = SpannerServicer()
+ spanner_grpc.add_SpannerServicer_to_server(spanner_servicer, spanner_server)
+ database_admin_servicer = DatabaseAdminServicer()
+ database_admin_grpc.add_DatabaseAdminServicer_to_server(
+ database_admin_servicer, spanner_server
+ )
+
+ # Start the server on a random port.
+ port = spanner_server.add_insecure_port("[::]:0")
+ spanner_server.start()
+ return spanner_server, spanner_servicer, database_admin_servicer, port
diff --git a/google/cloud/spanner_v1/testing/spanner_database_admin_pb2_grpc.py b/google/cloud/spanner_v1/testing/spanner_database_admin_pb2_grpc.py
new file mode 100644
index 0000000000..fdc26b30ad
--- /dev/null
+++ b/google/cloud/spanner_v1/testing/spanner_database_admin_pb2_grpc.py
@@ -0,0 +1,1267 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+
+
+# Generated with the following commands:
+#
+# pip install grpcio-tools
+# git clone git@github.com:googleapis/googleapis.git
+# cd googleapis
+# python -m grpc_tools.protoc \
+# -I . \
+# --python_out=. --pyi_out=. --grpc_python_out=. \
+# ./google/spanner/admin/database/v1/*.proto
+
+"""Client and server classes corresponding to protobuf-defined services."""
+
+import grpc
+from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
+from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
+from google.longrunning import (
+ operations_pb2 as google_dot_longrunning_dot_operations__pb2,
+)
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+from google.cloud.spanner_admin_database_v1.types import (
+ backup as google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2,
+)
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2,
+)
+from google.cloud.spanner_admin_database_v1.types import (
+ spanner_database_admin as google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2,
+)
+
+GRPC_GENERATED_VERSION = "1.67.0"
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+
+ _version_not_supported = first_version_is_lower(
+ GRPC_VERSION, GRPC_GENERATED_VERSION
+ )
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f"The grpc package installed is at version {GRPC_VERSION},"
+ + " but the generated code in google/spanner/admin/database/v1/spanner_database_admin_pb2_grpc.py depends on"
+ + f" grpcio>={GRPC_GENERATED_VERSION}."
+ + f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}"
+ + f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}."
+ )
+
+
+class DatabaseAdminServicer(object):
+ """Cloud Spanner Database Admin API
+
+ The Cloud Spanner Database Admin API can be used to:
+ * create, drop, and list databases
+ * update the schema of pre-existing databases
+ * create, delete, copy and list backups for a database
+ * restore a database from an existing backup
+ """
+
+ def ListDatabases(self, request, context):
+ """Lists Cloud Spanner databases."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def CreateDatabase(self, request, context):
+ """Creates a new Cloud Spanner database and starts to prepare it for serving.
+ The returned [long-running operation][google.longrunning.Operation] will
+ have a name of the format `/operations/` and
+ can be used to track preparation of the database. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type is
+ [Database][google.spanner.admin.database.v1.Database], if successful.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def GetDatabase(self, request, context):
+ """Gets the state of a Cloud Spanner database."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def UpdateDatabase(self, request, context):
+ """Updates a Cloud Spanner database. The returned
+ [long-running operation][google.longrunning.Operation] can be used to track
+ the progress of updating the database. If the named database does not
+ exist, returns `NOT_FOUND`.
+
+ While the operation is pending:
+
+ * The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field is set to true.
+ * Cancelling the operation is best-effort. If the cancellation succeeds,
+ the operation metadata's
+ [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ is set, the updates are reverted, and the operation terminates with a
+ `CANCELLED` status.
+ * New UpdateDatabase requests will return a `FAILED_PRECONDITION` error
+ until the pending operation is done (returns successfully or with
+ error).
+ * Reading the database via the API continues to give the pre-request
+ values.
+
+ Upon completion of the returned operation:
+
+ * The new values are in effect and readable via the API.
+ * The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field becomes false.
+
+ The returned [long-running operation][google.longrunning.Operation] will
+ have a name of the format
+ `projects//instances//databases//operations/`
+ and can be used to track the database modification. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type is
+ [Database][google.spanner.admin.database.v1.Database], if successful.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def UpdateDatabaseDdl(self, request, context):
+ """Updates the schema of a Cloud Spanner database by
+ creating/altering/dropping tables, columns, indexes, etc. The returned
+ [long-running operation][google.longrunning.Operation] will have a name of
+ the format `/operations/` and can be used to
+ track execution of the schema change(s). The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
+ The operation has no response.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def DropDatabase(self, request, context):
+ """Drops (aka deletes) a Cloud Spanner database.
+ Completed backups for the database will be retained according to their
+ `expire_time`.
+ Note: Cloud Spanner might continue to accept requests for a few seconds
+ after the database has been deleted.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def GetDatabaseDdl(self, request, context):
+ """Returns the schema of a Cloud Spanner database as a list of formatted
+ DDL statements. This method does not show pending schema updates, those may
+ be queried using the [Operations][google.longrunning.Operations] API.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def SetIamPolicy(self, request, context):
+ """Sets the access control policy on a database or backup resource.
+ Replaces any existing policy.
+
+ Authorization requires `spanner.databases.setIamPolicy`
+ permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ For backups, authorization requires `spanner.backups.setIamPolicy`
+ permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def GetIamPolicy(self, request, context):
+ """Gets the access control policy for a database or backup resource.
+ Returns an empty policy if a database or backup exists but does not have a
+ policy set.
+
+ Authorization requires `spanner.databases.getIamPolicy` permission on
+ [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ For backups, authorization requires `spanner.backups.getIamPolicy`
+ permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def TestIamPermissions(self, request, context):
+ """Returns permissions that the caller has on the specified database or backup
+ resource.
+
+ Attempting this RPC on a non-existent Cloud Spanner database will
+ result in a NOT_FOUND error if the user has
+ `spanner.databases.list` permission on the containing Cloud
+ Spanner instance. Otherwise returns an empty set of permissions.
+ Calling this method on a backup that does not exist will
+ result in a NOT_FOUND error if the user has
+ `spanner.backups.list` permission on the containing instance.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def CreateBackup(self, request, context):
+ """Starts creating a new Cloud Spanner Backup.
+ The returned backup [long-running operation][google.longrunning.Operation]
+ will have a name of the format
+ `projects//instances//backups//operations/`
+ and can be used to track creation of the backup. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ The [response][google.longrunning.Operation.response] field type is
+ [Backup][google.spanner.admin.database.v1.Backup], if successful.
+ Cancelling the returned operation will stop the creation and delete the
+ backup. There can be only one pending backup creation per database. Backup
+ creation of different databases can run concurrently.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def CopyBackup(self, request, context):
+ """Starts copying a Cloud Spanner Backup.
+ The returned backup [long-running operation][google.longrunning.Operation]
+ will have a name of the format
+ `projects//instances//backups//operations/`
+ and can be used to track copying of the backup. The operation is associated
+ with the destination backup.
+ The [metadata][google.longrunning.Operation.metadata] field type is
+ [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ The [response][google.longrunning.Operation.response] field type is
+ [Backup][google.spanner.admin.database.v1.Backup], if successful.
+ Cancelling the returned operation will stop the copying and delete the
+ destination backup. Concurrent CopyBackup requests can run on the same
+ source backup.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def GetBackup(self, request, context):
+ """Gets metadata on a pending or completed
+ [Backup][google.spanner.admin.database.v1.Backup].
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def UpdateBackup(self, request, context):
+ """Updates a pending or completed
+ [Backup][google.spanner.admin.database.v1.Backup].
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def DeleteBackup(self, request, context):
+ """Deletes a pending or completed
+ [Backup][google.spanner.admin.database.v1.Backup].
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def ListBackups(self, request, context):
+ """Lists completed and pending backups.
+ Backups returned are ordered by `create_time` in descending order,
+ starting from the most recent `create_time`.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def RestoreDatabase(self, request, context):
+ """Create a new database by restoring from a completed backup. The new
+ database must be in the same project and in an instance with the same
+ instance configuration as the instance containing
+ the backup. The returned database [long-running
+ operation][google.longrunning.Operation] has a name of the format
+ `projects//instances//databases//operations/`,
+ and can be used to track the progress of the operation, and to cancel it.
+ The [metadata][google.longrunning.Operation.metadata] field type is
+ [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] type
+ is [Database][google.spanner.admin.database.v1.Database], if
+ successful. Cancelling the returned operation will stop the restore and
+ delete the database.
+ There can be only one database being restored into an instance at a time.
+ Once the restore operation completes, a new restore operation can be
+ initiated, without waiting for the optimize operation associated with the
+ first restore to complete.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def ListDatabaseOperations(self, request, context):
+ """Lists database [longrunning-operations][google.longrunning.Operation].
+ A database operation has a name of the form
+ `projects//instances//databases//operations/`.
+ The long-running operation
+ [metadata][google.longrunning.Operation.metadata] field type
+ `metadata.type_url` describes the type of the metadata. Operations returned
+ include those that have completed/failed/canceled within the last 7 days,
+ and pending operations.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def ListBackupOperations(self, request, context):
+ """Lists the backup [long-running operations][google.longrunning.Operation] in
+ the given instance. A backup operation has a name of the form
+ `projects//instances//backups//operations/`.
+ The long-running operation
+ [metadata][google.longrunning.Operation.metadata] field type
+ `metadata.type_url` describes the type of the metadata. Operations returned
+ include those that have completed/failed/canceled within the last 7 days,
+ and pending operations. Operations returned are ordered by
+ `operation.metadata.value.progress.start_time` in descending order starting
+ from the most recently started operation.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def ListDatabaseRoles(self, request, context):
+ """Lists Cloud Spanner database roles."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def CreateBackupSchedule(self, request, context):
+ """Creates a new backup schedule."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def GetBackupSchedule(self, request, context):
+ """Gets backup schedule for the input schedule name."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def UpdateBackupSchedule(self, request, context):
+ """Updates a backup schedule."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def DeleteBackupSchedule(self, request, context):
+ """Deletes a backup schedule."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def ListBackupSchedules(self, request, context):
+ """Lists all the backup schedules for the database."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+
+def add_DatabaseAdminServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ "ListDatabases": grpc.unary_unary_rpc_method_handler(
+ servicer.ListDatabases,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabasesRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabasesResponse.serialize,
+ ),
+ "CreateDatabase": grpc.unary_unary_rpc_method_handler(
+ servicer.CreateDatabase,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.CreateDatabaseRequest.deserialize,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ "GetDatabase": grpc.unary_unary_rpc_method_handler(
+ servicer.GetDatabase,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.Database.serialize,
+ ),
+ "UpdateDatabase": grpc.unary_unary_rpc_method_handler(
+ servicer.UpdateDatabase,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.UpdateDatabaseRequest.deserialize,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ "UpdateDatabaseDdl": grpc.unary_unary_rpc_method_handler(
+ servicer.UpdateDatabaseDdl,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.deserialize,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ "DropDatabase": grpc.unary_unary_rpc_method_handler(
+ servicer.DropDatabase,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.DropDatabaseRequest.deserialize,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ "GetDatabaseDdl": grpc.unary_unary_rpc_method_handler(
+ servicer.GetDatabaseDdl,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.serialize,
+ ),
+ "SetIamPolicy": grpc.unary_unary_rpc_method_handler(
+ servicer.SetIamPolicy,
+ request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString,
+ response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
+ ),
+ "GetIamPolicy": grpc.unary_unary_rpc_method_handler(
+ servicer.GetIamPolicy,
+ request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString,
+ response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
+ ),
+ "TestIamPermissions": grpc.unary_unary_rpc_method_handler(
+ servicer.TestIamPermissions,
+ request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString,
+ response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString,
+ ),
+ "CreateBackup": grpc.unary_unary_rpc_method_handler(
+ servicer.CreateBackup,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.CreateBackupRequest.deserialize,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ "CopyBackup": grpc.unary_unary_rpc_method_handler(
+ servicer.CopyBackup,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.CopyBackupRequest.deserialize,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ "GetBackup": grpc.unary_unary_rpc_method_handler(
+ servicer.GetBackup,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.GetBackupRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.Backup.serialize,
+ ),
+ "UpdateBackup": grpc.unary_unary_rpc_method_handler(
+ servicer.UpdateBackup,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.UpdateBackupRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.Backup.serialize,
+ ),
+ "DeleteBackup": grpc.unary_unary_rpc_method_handler(
+ servicer.DeleteBackup,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.DeleteBackupRequest.deserialize,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ "ListBackups": grpc.unary_unary_rpc_method_handler(
+ servicer.ListBackups,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupsRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupsResponse.serialize,
+ ),
+ "RestoreDatabase": grpc.unary_unary_rpc_method_handler(
+ servicer.RestoreDatabase,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.RestoreDatabaseRequest.deserialize,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ "ListDatabaseOperations": grpc.unary_unary_rpc_method_handler(
+ servicer.ListDatabaseOperations,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseOperationsRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseOperationsResponse.serialize,
+ ),
+ "ListBackupOperations": grpc.unary_unary_rpc_method_handler(
+ servicer.ListBackupOperations,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupOperationsRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupOperationsResponse.serialize,
+ ),
+ "ListDatabaseRoles": grpc.unary_unary_rpc_method_handler(
+ servicer.ListDatabaseRoles,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseRolesRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseRolesResponse.serialize,
+ ),
+ "CreateBackupSchedule": grpc.unary_unary_rpc_method_handler(
+ servicer.CreateBackupSchedule,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.CreateBackupScheduleRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.serialize,
+ ),
+ "GetBackupSchedule": grpc.unary_unary_rpc_method_handler(
+ servicer.GetBackupSchedule,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.GetBackupScheduleRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.serialize,
+ ),
+ "UpdateBackupSchedule": grpc.unary_unary_rpc_method_handler(
+ servicer.UpdateBackupSchedule,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.UpdateBackupScheduleRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.serialize,
+ ),
+ "DeleteBackupSchedule": grpc.unary_unary_rpc_method_handler(
+ servicer.DeleteBackupSchedule,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.DeleteBackupScheduleRequest.deserialize,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ "ListBackupSchedules": grpc.unary_unary_rpc_method_handler(
+ servicer.ListBackupSchedules,
+ request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.ListBackupSchedulesRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.ListBackupSchedulesResponse.serialize,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ "google.spanner.admin.database.v1.DatabaseAdmin", rpc_method_handlers
+ )
+ server.add_generic_rpc_handlers((generic_handler,))
+ server.add_registered_method_handlers(
+ "google.spanner.admin.database.v1.DatabaseAdmin", rpc_method_handlers
+ )
+
+
+# This class is part of an EXPERIMENTAL API.
+class DatabaseAdmin(object):
+ """Cloud Spanner Database Admin API
+
+ The Cloud Spanner Database Admin API can be used to:
+ * create, drop, and list databases
+ * update the schema of pre-existing databases
+ * create, delete, copy and list backups for a database
+ * restore a database from an existing backup
+ """
+
+ @staticmethod
+ def ListDatabases(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabasesRequest.SerializeToString,
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabasesResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def CreateDatabase(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.CreateDatabaseRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def GetDatabase(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseRequest.SerializeToString,
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.Database.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def UpdateDatabase(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.UpdateDatabaseRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def UpdateDatabaseDdl(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def DropDatabase(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.DropDatabaseRequest.SerializeToString,
+ google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def GetDatabaseDdl(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.SerializeToString,
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def SetIamPolicy(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy",
+ google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
+ google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def GetIamPolicy(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy",
+ google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
+ google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def TestIamPermissions(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions",
+ google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
+ google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def CreateBackup(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.CreateBackupRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def CopyBackup(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.CopyBackupRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def GetBackup(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.GetBackupRequest.SerializeToString,
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.Backup.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def UpdateBackup(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.UpdateBackupRequest.SerializeToString,
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.Backup.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def DeleteBackup(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.DeleteBackupRequest.SerializeToString,
+ google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def ListBackups(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupsRequest.SerializeToString,
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupsResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def RestoreDatabase(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.RestoreDatabaseRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def ListDatabaseOperations(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseOperationsRequest.SerializeToString,
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseOperationsResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def ListBackupOperations(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupOperationsRequest.SerializeToString,
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupOperationsResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def ListDatabaseRoles(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseRolesRequest.SerializeToString,
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseRolesResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def CreateBackupSchedule(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.CreateBackupScheduleRequest.SerializeToString,
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def GetBackupSchedule(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.GetBackupScheduleRequest.SerializeToString,
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def UpdateBackupSchedule(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.UpdateBackupScheduleRequest.SerializeToString,
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def DeleteBackupSchedule(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.DeleteBackupScheduleRequest.SerializeToString,
+ google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def ListBackupSchedules(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules",
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.ListBackupSchedulesRequest.SerializeToString,
+ google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.ListBackupSchedulesResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
diff --git a/google/cloud/spanner_v1/testing/spanner_pb2_grpc.py b/google/cloud/spanner_v1/testing/spanner_pb2_grpc.py
new file mode 100644
index 0000000000..c4622a6a34
--- /dev/null
+++ b/google/cloud/spanner_v1/testing/spanner_pb2_grpc.py
@@ -0,0 +1,882 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+
+# Generated with the following commands:
+#
+# pip install grpcio-tools
+# git clone git@github.com:googleapis/googleapis.git
+# cd googleapis
+# python -m grpc_tools.protoc \
+# -I . \
+# --python_out=. --pyi_out=. --grpc_python_out=. \
+# ./google/spanner/v1/*.proto
+
+"""Client and server classes corresponding to protobuf-defined services."""
+
+import grpc
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+from google.cloud.spanner_v1.types import (
+ commit_response as google_dot_spanner_dot_v1_dot_commit__response__pb2,
+)
+from google.cloud.spanner_v1.types import (
+ result_set as google_dot_spanner_dot_v1_dot_result__set__pb2,
+)
+from google.cloud.spanner_v1.types import (
+ spanner as google_dot_spanner_dot_v1_dot_spanner__pb2,
+)
+from google.cloud.spanner_v1.types import (
+ transaction as google_dot_spanner_dot_v1_dot_transaction__pb2,
+)
+
+GRPC_GENERATED_VERSION = "1.67.0"
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+
+ _version_not_supported = first_version_is_lower(
+ GRPC_VERSION, GRPC_GENERATED_VERSION
+ )
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f"The grpc package installed is at version {GRPC_VERSION},"
+ + " but the generated code in google/spanner/v1/spanner_pb2_grpc.py depends on"
+ + f" grpcio>={GRPC_GENERATED_VERSION}."
+ + f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}"
+ + f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}."
+ )
+
+
+class SpannerServicer(object):
+ """Cloud Spanner API
+
+ The Cloud Spanner API can be used to manage sessions and execute
+ transactions on data stored in Cloud Spanner databases.
+ """
+
+ def CreateSession(self, request, context):
+ """Creates a new session. A session can be used to perform
+ transactions that read and/or modify data in a Cloud Spanner database.
+ Sessions are meant to be reused for many consecutive
+ transactions.
+
+ Sessions can only execute one transaction at a time. To execute
+ multiple concurrent read-write/write-only transactions, create
+ multiple sessions. Note that standalone reads and queries use a
+ transaction internally, and count toward the one transaction
+ limit.
+
+ Active sessions use additional server resources, so it is a good idea to
+ delete idle and unneeded sessions.
+ Aside from explicit deletes, Cloud Spanner may delete sessions for which no
+ operations are sent for more than an hour. If a session is deleted,
+ requests to it return `NOT_FOUND`.
+
+ Idle sessions can be kept alive by sending a trivial SQL query
+ periodically, e.g., `"SELECT 1"`.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def BatchCreateSessions(self, request, context):
+ """Creates multiple new sessions.
+
+ This API can be used to initialize a session cache on the clients.
+ See https://goo.gl/TgSFN2 for best practices on session cache management.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def GetSession(self, request, context):
+ """Gets a session. Returns `NOT_FOUND` if the session does not exist.
+ This is mainly useful for determining whether a session is still
+ alive.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def ListSessions(self, request, context):
+ """Lists all sessions in a given database."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def DeleteSession(self, request, context):
+ """Ends a session, releasing server resources associated with it. This will
+ asynchronously trigger cancellation of any operations that are running with
+ this session.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def ExecuteSql(self, request, context):
+ """Executes an SQL statement, returning all results in a single reply. This
+ method cannot be used to return a result set larger than 10 MiB;
+ if the query yields more data than that, the query fails with
+ a `FAILED_PRECONDITION` error.
+
+ Operations inside read-write transactions might return `ABORTED`. If
+ this occurs, the application should restart the transaction from
+ the beginning. See [Transaction][google.spanner.v1.Transaction] for more
+ details.
+
+ Larger result sets can be fetched in streaming fashion by calling
+ [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
+ instead.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def ExecuteStreamingSql(self, request, context):
+ """Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
+ result set as a stream. Unlike
+ [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
+ the size of the returned result set. However, no individual row in the
+ result set can exceed 100 MiB, and no column value can exceed 10 MiB.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def ExecuteBatchDml(self, request, context):
+ """Executes a batch of SQL DML statements. This method allows many statements
+ to be run with lower latency than submitting them sequentially with
+ [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
+
+ Statements are executed in sequential order. A request can succeed even if
+ a statement fails. The
+ [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
+ field in the response provides information about the statement that failed.
+ Clients must inspect this field to determine whether an error occurred.
+
+ Execution stops after the first failed statement; the remaining statements
+ are not executed.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def Read(self, request, context):
+ """Reads rows from the database using key lookups and scans, as a
+ simple key/value style alternative to
+ [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be
+ used to return a result set larger than 10 MiB; if the read matches more
+ data than that, the read fails with a `FAILED_PRECONDITION`
+ error.
+
+ Reads inside read-write transactions might return `ABORTED`. If
+ this occurs, the application should restart the transaction from
+ the beginning. See [Transaction][google.spanner.v1.Transaction] for more
+ details.
+
+ Larger result sets can be yielded in streaming fashion by calling
+ [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def StreamingRead(self, request, context):
+ """Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
+ as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
+ limit on the size of the returned result set. However, no individual row in
+ the result set can exceed 100 MiB, and no column value can exceed
+ 10 MiB.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def BeginTransaction(self, request, context):
+ """Begins a new transaction. This step can often be skipped:
+ [Read][google.spanner.v1.Spanner.Read],
+ [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
+ [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
+ side-effect.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def Commit(self, request, context):
+ """Commits a transaction. The request includes the mutations to be
+ applied to rows in the database.
+
+ `Commit` might return an `ABORTED` error. This can occur at any time;
+ commonly, the cause is conflicts with concurrent
+ transactions. However, it can also happen for a variety of other
+ reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
+ the transaction from the beginning, re-using the same session.
+
+ On very rare occasions, `Commit` might return `UNKNOWN`. This can happen,
+ for example, if the client job experiences a 1+ hour networking failure.
+ At that point, Cloud Spanner has lost track of the transaction outcome and
+ we recommend that you perform another read from the database to see the
+ state of things as they are now.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def Rollback(self, request, context):
+ """Rolls back a transaction, releasing any locks it holds. It is a good
+ idea to call this for any transaction that includes one or more
+ [Read][google.spanner.v1.Spanner.Read] or
+ [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
+ decides not to commit.
+
+ `Rollback` returns `OK` if it successfully aborts the transaction, the
+ transaction was already aborted, or the transaction is not
+ found. `Rollback` never returns `ABORTED`.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def PartitionQuery(self, request, context):
+ """Creates a set of partition tokens that can be used to execute a query
+ operation in parallel. Each of the returned partition tokens can be used
+ by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
+ specify a subset of the query result to read. The same session and
+ read-only transaction must be used by the PartitionQueryRequest used to
+ create the partition tokens and the ExecuteSqlRequests that use the
+ partition tokens.
+
+ Partition tokens become invalid when the session used to create them
+ is deleted, is idle for too long, begins a new transaction, or becomes too
+ old. When any of these happen, it is not possible to resume the query, and
+ the whole operation must be restarted from the beginning.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def PartitionRead(self, request, context):
+ """Creates a set of partition tokens that can be used to execute a read
+ operation in parallel. Each of the returned partition tokens can be used
+ by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
+ subset of the read result to read. The same session and read-only
+ transaction must be used by the PartitionReadRequest used to create the
+ partition tokens and the ReadRequests that use the partition tokens. There
+ are no ordering guarantees on rows returned among the returned partition
+ tokens, or even within each individual StreamingRead call issued with a
+ partition_token.
+
+ Partition tokens become invalid when the session used to create them
+ is deleted, is idle for too long, begins a new transaction, or becomes too
+ old. When any of these happen, it is not possible to resume the read, and
+ the whole operation must be restarted from the beginning.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+ def BatchWrite(self, request, context):
+ """Batches the supplied mutation groups in a collection of efficient
+ transactions. All mutations in a group are committed atomically. However,
+ mutations across groups can be committed non-atomically in an unspecified
+ order and thus, they must be independent of each other. Partial failure is
+ possible, i.e., some groups may have been committed successfully, while
+ some may have failed. The results of individual batches are streamed into
+ the response as the batches are applied.
+
+ BatchWrite requests are not replay protected, meaning that each mutation
+ group may be applied more than once. Replays of non-idempotent mutations
+ may have undesirable effects. For example, replays of an insert mutation
+ may produce an already exists error or if you use generated or commit
+ timestamp-based keys, it may result in additional rows being added to the
+ mutation's table. We recommend structuring your mutation groups to be
+ idempotent to avoid this issue.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details("Method not implemented!")
+ raise NotImplementedError("Method not implemented!")
+
+
+def add_SpannerServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ "CreateSession": grpc.unary_unary_rpc_method_handler(
+ servicer.CreateSession,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.CreateSessionRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.Session.serialize,
+ ),
+ "BatchCreateSessions": grpc.unary_unary_rpc_method_handler(
+ servicer.BatchCreateSessions,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BatchCreateSessionsRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BatchCreateSessionsResponse.serialize,
+ ),
+ "GetSession": grpc.unary_unary_rpc_method_handler(
+ servicer.GetSession,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.GetSessionRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.Session.serialize,
+ ),
+ "ListSessions": grpc.unary_unary_rpc_method_handler(
+ servicer.ListSessions,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ListSessionsRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ListSessionsResponse.serialize,
+ ),
+ "DeleteSession": grpc.unary_unary_rpc_method_handler(
+ servicer.DeleteSession,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.DeleteSessionRequest.deserialize,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ "ExecuteSql": grpc.unary_unary_rpc_method_handler(
+ servicer.ExecuteSql,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteSqlRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_result__set__pb2.ResultSet.serialize,
+ ),
+ "ExecuteStreamingSql": grpc.unary_stream_rpc_method_handler(
+ servicer.ExecuteStreamingSql,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteSqlRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_result__set__pb2.PartialResultSet.serialize,
+ ),
+ "ExecuteBatchDml": grpc.unary_unary_rpc_method_handler(
+ servicer.ExecuteBatchDml,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteBatchDmlRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteBatchDmlResponse.serialize,
+ ),
+ "Read": grpc.unary_unary_rpc_method_handler(
+ servicer.Read,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ReadRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_result__set__pb2.ResultSet.serialize,
+ ),
+ "StreamingRead": grpc.unary_stream_rpc_method_handler(
+ servicer.StreamingRead,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ReadRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_result__set__pb2.PartialResultSet.serialize,
+ ),
+ "BeginTransaction": grpc.unary_unary_rpc_method_handler(
+ servicer.BeginTransaction,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BeginTransactionRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_transaction__pb2.Transaction.serialize,
+ ),
+ "Commit": grpc.unary_unary_rpc_method_handler(
+ servicer.Commit,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.CommitRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_commit__response__pb2.CommitResponse.serialize,
+ ),
+ "Rollback": grpc.unary_unary_rpc_method_handler(
+ servicer.Rollback,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.RollbackRequest.deserialize,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ "PartitionQuery": grpc.unary_unary_rpc_method_handler(
+ servicer.PartitionQuery,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionQueryRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionResponse.serialize,
+ ),
+ "PartitionRead": grpc.unary_unary_rpc_method_handler(
+ servicer.PartitionRead,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionReadRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionResponse.serialize,
+ ),
+ "BatchWrite": grpc.unary_stream_rpc_method_handler(
+ servicer.BatchWrite,
+ request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BatchWriteRequest.deserialize,
+ response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BatchWriteResponse.serialize,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ "google.spanner.v1.Spanner", rpc_method_handlers
+ )
+ server.add_generic_rpc_handlers((generic_handler,))
+ server.add_registered_method_handlers(
+ "google.spanner.v1.Spanner", rpc_method_handlers
+ )
+
+
+# This class is part of an EXPERIMENTAL API.
+class Spanner(object):
+ """Cloud Spanner API
+
+ The Cloud Spanner API can be used to manage sessions and execute
+ transactions on data stored in Cloud Spanner databases.
+ """
+
+ @staticmethod
+ def CreateSession(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/CreateSession",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.CreateSessionRequest.to_json,
+ google_dot_spanner_dot_v1_dot_spanner__pb2.Session.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def BatchCreateSessions(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/BatchCreateSessions",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.BatchCreateSessionsRequest.to_json,
+ google_dot_spanner_dot_v1_dot_spanner__pb2.BatchCreateSessionsResponse.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def GetSession(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/GetSession",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.GetSessionRequest.to_json,
+ google_dot_spanner_dot_v1_dot_spanner__pb2.Session.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def ListSessions(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/ListSessions",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.ListSessionsRequest.to_json,
+ google_dot_spanner_dot_v1_dot_spanner__pb2.ListSessionsResponse.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def DeleteSession(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/DeleteSession",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.DeleteSessionRequest.to_json,
+ google_dot_protobuf_dot_empty__pb2.Empty.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def ExecuteSql(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/ExecuteSql",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteSqlRequest.to_json,
+ google_dot_spanner_dot_v1_dot_result__set__pb2.ResultSet.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def ExecuteStreamingSql(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_stream(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/ExecuteStreamingSql",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteSqlRequest.to_json,
+ google_dot_spanner_dot_v1_dot_result__set__pb2.PartialResultSet.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def ExecuteBatchDml(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/ExecuteBatchDml",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteBatchDmlRequest.to_json,
+ google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteBatchDmlResponse.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def Read(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/Read",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.ReadRequest.to_json,
+ google_dot_spanner_dot_v1_dot_result__set__pb2.ResultSet.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def StreamingRead(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_stream(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/StreamingRead",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.ReadRequest.to_json,
+ google_dot_spanner_dot_v1_dot_result__set__pb2.PartialResultSet.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def BeginTransaction(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/BeginTransaction",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.BeginTransactionRequest.to_json,
+ google_dot_spanner_dot_v1_dot_transaction__pb2.Transaction.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def Commit(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/Commit",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.CommitRequest.to_json,
+ google_dot_spanner_dot_v1_dot_commit__response__pb2.CommitResponse.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def Rollback(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/Rollback",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.RollbackRequest.to_json,
+ google_dot_protobuf_dot_empty__pb2.Empty.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def PartitionQuery(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/PartitionQuery",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionQueryRequest.to_json,
+ google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionResponse.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def PartitionRead(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/PartitionRead",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionReadRequest.to_json,
+ google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionResponse.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
+
+ @staticmethod
+ def BatchWrite(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_stream(
+ request,
+ target,
+ "/google.spanner.v1.Spanner/BatchWrite",
+ google_dot_spanner_dot_v1_dot_spanner__pb2.BatchWriteRequest.to_json,
+ google_dot_spanner_dot_v1_dot_spanner__pb2.BatchWriteResponse.from_json,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True,
+ )
diff --git a/google/cloud/spanner_v1/transaction.py b/google/cloud/spanner_v1/transaction.py
index c872cc380d..d99c4fde2f 100644
--- a/google/cloud/spanner_v1/transaction.py
+++ b/google/cloud/spanner_v1/transaction.py
@@ -98,7 +98,13 @@ def _make_txn_selector(self):
return TransactionSelector(id=self._transaction_id)
def _execute_request(
- self, method, request, trace_name=None, session=None, attributes=None
+ self,
+ method,
+ request,
+ trace_name=None,
+ session=None,
+ attributes=None,
+ observability_options=None,
):
"""Helper method to execute request after fetching transaction selector.
@@ -110,7 +116,9 @@ def _execute_request(
"""
transaction = self._make_txn_selector()
request.transaction = transaction
- with trace_call(trace_name, session, attributes):
+ with trace_call(
+ trace_name, session, attributes, observability_options=observability_options
+ ):
method = functools.partial(method, request=request)
response = _retry(
method,
@@ -147,7 +155,12 @@ def begin(self):
read_write=TransactionOptions.ReadWrite(),
exclude_txn_from_change_streams=self.exclude_txn_from_change_streams,
)
- with trace_call("CloudSpanner.BeginTransaction", self._session):
+ observability_options = getattr(database, "observability_options", None)
+ with trace_call(
+ "CloudSpanner.BeginTransaction",
+ self._session,
+ observability_options=observability_options,
+ ):
method = functools.partial(
api.begin_transaction,
session=self._session.name,
@@ -175,7 +188,12 @@ def rollback(self):
database._route_to_leader_enabled
)
)
- with trace_call("CloudSpanner.Rollback", self._session):
+ observability_options = getattr(database, "observability_options", None)
+ with trace_call(
+ "CloudSpanner.Rollback",
+ self._session,
+ observability_options=observability_options,
+ ):
method = functools.partial(
api.rollback,
session=self._session.name,
@@ -248,7 +266,13 @@ def commit(
max_commit_delay=max_commit_delay,
request_options=request_options,
)
- with trace_call("CloudSpanner.Commit", self._session, trace_attributes):
+ observability_options = getattr(database, "observability_options", None)
+ with trace_call(
+ "CloudSpanner.Commit",
+ self._session,
+ trace_attributes,
+ observability_options,
+ ):
method = functools.partial(
api.commit,
request=request,
@@ -284,7 +308,7 @@ def _make_params_pb(params, param_types):
:raises ValueError:
If ``params`` is None but ``param_types`` is not None.
"""
- if params is not None:
+ if params:
return Struct(
fields={key: _make_value_pb(value) for key, value in params.items()}
)
@@ -362,6 +386,9 @@ def execute_update(
# environment-level options
default_query_options = database._instance._client._query_options
query_options = _merge_query_options(default_query_options, query_options)
+ observability_options = getattr(
+ database._instance._client, "observability_options", None
+ )
if request_options is None:
request_options = RequestOptions()
@@ -399,6 +426,7 @@ def execute_update(
"CloudSpanner.ReadWriteTransaction",
self._session,
trace_attributes,
+ observability_options=observability_options,
)
# Setting the transaction id because the transaction begin was inlined for first rpc.
if (
@@ -415,6 +443,7 @@ def execute_update(
"CloudSpanner.ReadWriteTransaction",
self._session,
trace_attributes,
+ observability_options=observability_options,
)
return response.stats.row_count_exact
@@ -481,6 +510,7 @@ def batch_update(
_metadata_with_leader_aware_routing(database._route_to_leader_enabled)
)
api = database.spanner_api
+ observability_options = getattr(database, "observability_options", None)
seqno, self._execute_sql_count = (
self._execute_sql_count,
@@ -521,6 +551,7 @@ def batch_update(
"CloudSpanner.DMLTransaction",
self._session,
trace_attributes,
+ observability_options=observability_options,
)
# Setting the transaction id because the transaction begin was inlined for first rpc.
for result_set in response.result_sets:
@@ -538,6 +569,7 @@ def batch_update(
"CloudSpanner.DMLTransaction",
self._session,
trace_attributes,
+ observability_options=observability_options,
)
row_counts = [
diff --git a/noxfile.py b/noxfile.py
index f5a2761d73..f32c24f1e3 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -34,6 +34,7 @@
DEFAULT_PYTHON_VERSION = "3.8"
+DEFAULT_MOCK_SERVER_TESTS_PYTHON_VERSION = "3.12"
UNIT_TEST_PYTHON_VERSIONS: List[str] = [
"3.7",
"3.8",
@@ -234,6 +235,34 @@ def unit(session, protobuf_implementation):
)
+@nox.session(python=DEFAULT_MOCK_SERVER_TESTS_PYTHON_VERSION)
+def mockserver(session):
+ # Install all test dependencies, then install this package in-place.
+
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ # install_unittest_dependencies(session, "-c", constraints_path)
+ standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
+ session.install(*standard_deps, "-c", constraints_path)
+ session.install("-e", ".", "-c", constraints_path)
+
+ # Run py.test against the mockserver tests.
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=unit_{session.python}_sponge_log.xml",
+ "--cov=google",
+ "--cov=tests/unit",
+ "--cov-append",
+ "--cov-config=.coveragerc",
+ "--cov-report=",
+ "--cov-fail-under=0",
+ os.path.join("tests", "mockserver_tests"),
+ *session.posargs,
+ )
+
+
def install_systemtest_dependencies(session, *constraints):
# Use pre-release gRPC for system tests.
# Exclude version 1.52.0rc1 which has a known issue.
diff --git a/owlbot.py b/owlbot.py
index c215f26946..e7fb391c2a 100644
--- a/owlbot.py
+++ b/owlbot.py
@@ -307,4 +307,49 @@ def prerelease_deps\(session, protobuf_implementation\):""",
def prerelease_deps(session, protobuf_implementation, database_dialect):""",
)
+
+mockserver_test = """
+@nox.session(python=DEFAULT_MOCK_SERVER_TESTS_PYTHON_VERSION)
+def mockserver(session):
+ # Install all test dependencies, then install this package in-place.
+
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ # install_unittest_dependencies(session, "-c", constraints_path)
+ standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
+ session.install(*standard_deps, "-c", constraints_path)
+ session.install("-e", ".", "-c", constraints_path)
+
+ # Run py.test against the mockserver tests.
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=unit_{session.python}_sponge_log.xml",
+ "--cov=google",
+ "--cov=tests/unit",
+ "--cov-append",
+ "--cov-config=.coveragerc",
+ "--cov-report=",
+ "--cov-fail-under=0",
+ os.path.join("tests", "mockserver_tests"),
+ *session.posargs,
+ )
+
+"""
+
+place_before(
+ "noxfile.py",
+ "def install_systemtest_dependencies(session, *constraints):",
+ mockserver_test,
+ escape="()_*:",
+)
+
+place_before(
+ "noxfile.py",
+ "UNIT_TEST_PYTHON_VERSIONS: List[str] = [",
+ 'DEFAULT_MOCK_SERVER_TESTS_PYTHON_VERSION = "3.12"',
+ escape="[]",
+)
+
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
diff --git a/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json
index 9324f2056b..7c35814b17 100644
--- a/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json
+++ b/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json
@@ -8,7 +8,7 @@
],
"language": "PYTHON",
"name": "google-cloud-spanner-admin-database",
- "version": "3.50.1"
+ "version": "3.51.0"
},
"snippets": [
{
diff --git a/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json
index 7f64769236..261a7d44f3 100644
--- a/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json
+++ b/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json
@@ -8,7 +8,7 @@
],
"language": "PYTHON",
"name": "google-cloud-spanner-admin-instance",
- "version": "3.50.1"
+ "version": "3.51.0"
},
"snippets": [
{
diff --git a/samples/generated_samples/snippet_metadata_google.spanner.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.v1.json
index 431109d19e..ddb4419273 100644
--- a/samples/generated_samples/snippet_metadata_google.spanner.v1.json
+++ b/samples/generated_samples/snippet_metadata_google.spanner.v1.json
@@ -8,7 +8,7 @@
],
"language": "PYTHON",
"name": "google-cloud-spanner",
- "version": "3.50.1"
+ "version": "3.51.0"
},
"snippets": [
{
diff --git a/samples/samples/requirements.txt b/samples/samples/requirements.txt
index 5a108d39ef..4009a0a00b 100644
--- a/samples/samples/requirements.txt
+++ b/samples/samples/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-spanner==3.49.1
+google-cloud-spanner==3.50.0
futures==3.4.0; python_version < "3"
diff --git a/samples/samples/snippets.py b/samples/samples/snippets.py
index c958a66822..6650ebe88d 100644
--- a/samples/samples/snippets.py
+++ b/samples/samples/snippets.py
@@ -3222,6 +3222,57 @@ def create_instance_with_autoscaling_config(instance_id):
# [END spanner_create_instance_with_autoscaling_config]
+# [START spanner_create_instance_without_default_backup_schedule]
+def create_instance_without_default_backup_schedules(instance_id):
+ spanner_client = spanner.Client()
+ config_name = "{}/instanceConfigs/regional-me-central2".format(
+ spanner_client.project_name
+ )
+
+ operation = spanner_client.instance_admin_api.create_instance(
+ parent=spanner_client.project_name,
+ instance_id=instance_id,
+ instance=spanner_instance_admin.Instance(
+ config=config_name,
+ display_name="This is a display name.",
+ node_count=1,
+ default_backup_schedule_type=spanner_instance_admin.Instance.DefaultBackupScheduleType.NONE, # Optional
+ ),
+ )
+
+ print("Waiting for operation to complete...")
+ operation.result(OPERATION_TIMEOUT_SECONDS)
+
+ print("Created instance {} without default backup schedules".format(instance_id))
+
+
+# [END spanner_create_instance_without_default_backup_schedule]
+
+
+# [START spanner_update_instance_default_backup_schedule_type]
+def update_instance_default_backup_schedule_type(instance_id):
+ spanner_client = spanner.Client()
+
+ name = "{}/instances/{}".format(spanner_client.project_name, instance_id)
+
+ operation = spanner_client.instance_admin_api.update_instance(
+ instance=spanner_instance_admin.Instance(
+ name=name,
+ default_backup_schedule_type=spanner_instance_admin.Instance.DefaultBackupScheduleType.AUTOMATIC, # Optional
+ ),
+ field_mask=field_mask_pb2.FieldMask(
+ paths=["default_backup_schedule_type"]
+ ),
+ )
+
+ print("Waiting for operation to complete...")
+ operation.result(OPERATION_TIMEOUT_SECONDS)
+
+ print("Updated instance {} to have default backup schedules".format(instance_id))
+
+# [END spanner_update_instance_default_backup_schedule_type]
+
+
def add_proto_type_columns(instance_id, database_id):
# [START spanner_add_proto_type_columns]
# instance_id = "your-spanner-instance"
diff --git a/samples/samples/snippets_test.py b/samples/samples/snippets_test.py
index ba3c0bbfe7..87fa7a43a2 100644
--- a/samples/samples/snippets_test.py
+++ b/samples/samples/snippets_test.py
@@ -197,6 +197,25 @@ def test_create_instance_with_autoscaling_config(capsys, lci_instance_id):
retry_429(instance.delete)()
+def test_create_and_update_instance_default_backup_schedule_type(capsys, lci_instance_id):
+ retry_429(snippets.create_instance_without_default_backup_schedules)(
+ lci_instance_id,
+ )
+ create_out, _ = capsys.readouterr()
+ assert lci_instance_id in create_out
+ assert "without default backup schedules" in create_out
+
+ retry_429(snippets.update_instance_default_backup_schedule_type)(
+ lci_instance_id,
+ )
+ update_out, _ = capsys.readouterr()
+ assert lci_instance_id in update_out
+ assert "to have default backup schedules" in update_out
+ spanner_client = spanner.Client()
+ instance = spanner_client.instance(lci_instance_id)
+ retry_429(instance.delete)()
+
+
def test_create_instance_partition(capsys, instance_partition_instance_id):
# Unable to use create_instance since it has editions set where partitions are unsupported.
# The minimal requirement for editions is ENTERPRISE_PLUS for the paritions to get supported.
diff --git a/tests/mockserver_tests/__init__.py b/tests/mockserver_tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/mockserver_tests/mock_server_test_base.py b/tests/mockserver_tests/mock_server_test_base.py
new file mode 100644
index 0000000000..1cd7656297
--- /dev/null
+++ b/tests/mockserver_tests/mock_server_test_base.py
@@ -0,0 +1,139 @@
+# Copyright 2024 Google LLC All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+from google.cloud.spanner_dbapi.parsed_statement import AutocommitDmlMode
+from google.cloud.spanner_v1.testing.mock_database_admin import DatabaseAdminServicer
+from google.cloud.spanner_v1.testing.mock_spanner import (
+ start_mock_server,
+ SpannerServicer,
+)
+import google.cloud.spanner_v1.types.type as spanner_type
+import google.cloud.spanner_v1.types.result_set as result_set
+from google.api_core.client_options import ClientOptions
+from google.auth.credentials import AnonymousCredentials
+from google.cloud.spanner_v1 import Client, TypeCode, FixedSizePool
+from google.cloud.spanner_v1.database import Database
+from google.cloud.spanner_v1.instance import Instance
+import grpc
+
+
+def add_result(sql: str, result: result_set.ResultSet):
+ MockServerTestBase.spanner_service.mock_spanner.add_result(sql, result)
+
+
+def add_update_count(
+ sql: str, count: int, dml_mode: AutocommitDmlMode = AutocommitDmlMode.TRANSACTIONAL
+):
+ if dml_mode == AutocommitDmlMode.PARTITIONED_NON_ATOMIC:
+ stats = dict(row_count_lower_bound=count)
+ else:
+ stats = dict(row_count_exact=count)
+ result = result_set.ResultSet(dict(stats=result_set.ResultSetStats(stats)))
+ add_result(sql, result)
+
+
+def add_select1_result():
+ add_single_result("select 1", "c", TypeCode.INT64, [("1",)])
+
+
+def add_single_result(
+ sql: str, column_name: str, type_code: spanner_type.TypeCode, row
+):
+ result = result_set.ResultSet(
+ dict(
+ metadata=result_set.ResultSetMetadata(
+ dict(
+ row_type=spanner_type.StructType(
+ dict(
+ fields=[
+ spanner_type.StructType.Field(
+ dict(
+ name=column_name,
+ type=spanner_type.Type(dict(code=type_code)),
+ )
+ )
+ ]
+ )
+ )
+ )
+ ),
+ )
+ )
+ result.rows.extend(row)
+ MockServerTestBase.spanner_service.mock_spanner.add_result(sql, result)
+
+
+class MockServerTestBase(unittest.TestCase):
+ server: grpc.Server = None
+ spanner_service: SpannerServicer = None
+ database_admin_service: DatabaseAdminServicer = None
+ port: int = None
+
+ def __init__(self, *args, **kwargs):
+ super(MockServerTestBase, self).__init__(*args, **kwargs)
+ self._client = None
+ self._instance = None
+ self._database = None
+
+ @classmethod
+ def setup_class(cls):
+ (
+ MockServerTestBase.server,
+ MockServerTestBase.spanner_service,
+ MockServerTestBase.database_admin_service,
+ MockServerTestBase.port,
+ ) = start_mock_server()
+
+ @classmethod
+ def teardown_class(cls):
+ if MockServerTestBase.server is not None:
+ MockServerTestBase.server.stop(grace=None)
+ MockServerTestBase.server = None
+
+ def setup_method(self, *args, **kwargs):
+ self._client = None
+ self._instance = None
+ self._database = None
+
+ def teardown_method(self, *args, **kwargs):
+ MockServerTestBase.spanner_service.clear_requests()
+ MockServerTestBase.database_admin_service.clear_requests()
+
+ @property
+ def client(self) -> Client:
+ if self._client is None:
+ self._client = Client(
+ project="p",
+ credentials=AnonymousCredentials(),
+ client_options=ClientOptions(
+ api_endpoint="localhost:" + str(MockServerTestBase.port),
+ ),
+ )
+ return self._client
+
+ @property
+ def instance(self) -> Instance:
+ if self._instance is None:
+ self._instance = self.client.instance("test-instance")
+ return self._instance
+
+ @property
+ def database(self) -> Database:
+ if self._database is None:
+ self._database = self.instance.database(
+ "test-database", pool=FixedSizePool(size=10)
+ )
+ return self._database
diff --git a/tests/mockserver_tests/test_basics.py b/tests/mockserver_tests/test_basics.py
new file mode 100644
index 0000000000..ed0906cb9b
--- /dev/null
+++ b/tests/mockserver_tests/test_basics.py
@@ -0,0 +1,87 @@
+# Copyright 2024 Google LLC All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
+from google.cloud.spanner_dbapi import Connection
+from google.cloud.spanner_dbapi.parsed_statement import AutocommitDmlMode
+from google.cloud.spanner_v1 import (
+ BatchCreateSessionsRequest,
+ ExecuteSqlRequest,
+ BeginTransactionRequest,
+ TransactionOptions,
+)
+
+from tests.mockserver_tests.mock_server_test_base import (
+ MockServerTestBase,
+ add_select1_result,
+ add_update_count,
+)
+
+
+class TestBasics(MockServerTestBase):
+ def test_select1(self):
+ add_select1_result()
+ with self.database.snapshot() as snapshot:
+ results = snapshot.execute_sql("select 1")
+ result_list = []
+ for row in results:
+ result_list.append(row)
+ self.assertEqual(1, row[0])
+ self.assertEqual(1, len(result_list))
+ requests = self.spanner_service.requests
+ self.assertEqual(2, len(requests), msg=requests)
+ self.assertTrue(isinstance(requests[0], BatchCreateSessionsRequest))
+ self.assertTrue(isinstance(requests[1], ExecuteSqlRequest))
+
+ def test_create_table(self):
+ database_admin_api = self.client.database_admin_api
+ request = spanner_database_admin.UpdateDatabaseDdlRequest(
+ dict(
+ database=database_admin_api.database_path(
+ "test-project", "test-instance", "test-database"
+ ),
+ statements=[
+ "CREATE TABLE Test ("
+ "Id INT64, "
+ "Value STRING(MAX)) "
+ "PRIMARY KEY (Id)",
+ ],
+ )
+ )
+ operation = database_admin_api.update_database_ddl(request)
+ operation.result(1)
+
+ # TODO: Move this to a separate class once the mock server test setup has
+ # been re-factored to use a base class for the boiler plate code.
+ def test_dbapi_partitioned_dml(self):
+ sql = "UPDATE singers SET foo='bar' WHERE active = true"
+ add_update_count(sql, 100, AutocommitDmlMode.PARTITIONED_NON_ATOMIC)
+ connection = Connection(self.instance, self.database)
+ connection.autocommit = True
+ connection.set_autocommit_dml_mode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC)
+ with connection.cursor() as cursor:
+ # Note: SQLAlchemy uses [] as the list of parameters for statements
+ # with no parameters.
+ cursor.execute(sql, [])
+ self.assertEqual(100, cursor.rowcount)
+
+ requests = self.spanner_service.requests
+ self.assertEqual(3, len(requests), msg=requests)
+ self.assertTrue(isinstance(requests[0], BatchCreateSessionsRequest))
+ self.assertTrue(isinstance(requests[1], BeginTransactionRequest))
+ self.assertTrue(isinstance(requests[2], ExecuteSqlRequest))
+ begin_request: BeginTransactionRequest = requests[1]
+ self.assertEqual(
+ TransactionOptions(dict(partitioned_dml={})), begin_request.options
+ )
diff --git a/tests/system/test_dbapi.py b/tests/system/test_dbapi.py
index feb580d903..a98f100bcc 100644
--- a/tests/system/test_dbapi.py
+++ b/tests/system/test_dbapi.py
@@ -11,11 +11,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+import base64
import datetime
from collections import defaultdict
+
import pytest
import time
+import decimal
from google.cloud import spanner_v1
from google.cloud._helpers import UTC
@@ -50,7 +52,22 @@
SQL SECURITY INVOKER
AS
SELECT c.email
- FROM contacts AS c;"""
+ FROM contacts AS c;
+
+ CREATE TABLE all_types (
+ id int64,
+ col_bool bool,
+ col_bytes bytes(max),
+ col_date date,
+ col_float32 float32,
+ col_float64 float64,
+ col_int64 int64,
+ col_json json,
+ col_numeric numeric,
+ col_string string(max),
+ coL_timestamp timestamp,
+ ) primary key (col_int64);
+ """
DDL_STATEMENTS = [stmt.strip() for stmt in DDL.split(";") if stmt.strip()]
@@ -1602,3 +1619,29 @@ def test_list_tables(self, include_views):
def test_invalid_statement_error(self):
with pytest.raises(ProgrammingError):
self._cursor.execute("-- comment only")
+
+ def test_insert_all_types(self):
+ """Test inserting all supported data types"""
+
+ self._conn.autocommit = True
+ self._cursor.execute(
+ """
+ INSERT INTO all_types (id, col_bool, col_bytes, col_date, col_float32, col_float64,
+ col_int64, col_json, col_numeric, col_string, col_timestamp)
+ VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
+ """,
+ (
+ 1,
+ True,
+ base64.b64encode(b"test-bytes"),
+ datetime.date(2024, 12, 3),
+ 3.14,
+ 3.14,
+ 123,
+ JsonObject({"key": "value"}),
+ decimal.Decimal("3.14"),
+ "test-string",
+ datetime.datetime(2024, 12, 3, 17, 30, 14),
+ ),
+ )
+ assert self._cursor.rowcount == 1
diff --git a/tests/system/test_observability_options.py b/tests/system/test_observability_options.py
new file mode 100644
index 0000000000..8382255c15
--- /dev/null
+++ b/tests/system/test_observability_options.py
@@ -0,0 +1,134 @@
+# Copyright 2024 Google LLC All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from . import _helpers
+from google.cloud.spanner_v1 import Client
+
+HAS_OTEL_INSTALLED = False
+
+try:
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
+ InMemorySpanExporter,
+ )
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.sampling import ALWAYS_ON
+ from opentelemetry import trace
+
+ HAS_OTEL_INSTALLED = True
+except ImportError:
+ pass
+
+
+@pytest.mark.skipif(
+ not HAS_OTEL_INSTALLED, reason="OpenTelemetry is necessary to test traces."
+)
+@pytest.mark.skipif(
+ not _helpers.USE_EMULATOR, reason="mulator is necessary to test traces."
+)
+def test_observability_options_propagation():
+ PROJECT = _helpers.EMULATOR_PROJECT
+ CONFIGURATION_NAME = "config-name"
+ INSTANCE_ID = _helpers.INSTANCE_ID
+ DISPLAY_NAME = "display-name"
+ DATABASE_ID = _helpers.unique_id("temp_db")
+ NODE_COUNT = 5
+ LABELS = {"test": "true"}
+
+ def test_propagation(enable_extended_tracing):
+ global_tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ trace.set_tracer_provider(global_tracer_provider)
+ global_trace_exporter = InMemorySpanExporter()
+ global_tracer_provider.add_span_processor(
+ SimpleSpanProcessor(global_trace_exporter)
+ )
+
+ inject_tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ inject_trace_exporter = InMemorySpanExporter()
+ inject_tracer_provider.add_span_processor(
+ SimpleSpanProcessor(inject_trace_exporter)
+ )
+ observability_options = dict(
+ tracer_provider=inject_tracer_provider,
+ enable_extended_tracing=enable_extended_tracing,
+ )
+ client = Client(
+ project=PROJECT,
+ observability_options=observability_options,
+ credentials=_make_credentials(),
+ )
+
+ instance = client.instance(
+ INSTANCE_ID,
+ CONFIGURATION_NAME,
+ display_name=DISPLAY_NAME,
+ node_count=NODE_COUNT,
+ labels=LABELS,
+ )
+
+ try:
+ instance.create()
+ except Exception:
+ pass
+
+ db = instance.database(DATABASE_ID)
+ try:
+ db.create()
+ except Exception:
+ pass
+
+ assert db.observability_options == observability_options
+ with db.snapshot() as snapshot:
+ res = snapshot.execute_sql("SELECT 1")
+ for val in res:
+ _ = val
+
+ from_global_spans = global_trace_exporter.get_finished_spans()
+ from_inject_spans = inject_trace_exporter.get_finished_spans()
+ assert (
+ len(from_global_spans) == 0
+ ) # "Expecting no spans from the global trace exporter"
+ assert (
+ len(from_inject_spans) >= 2
+ ) # "Expecting at least 2 spans from the injected trace exporter"
+ gotNames = [span.name for span in from_inject_spans]
+ wantNames = ["CloudSpanner.CreateSession", "CloudSpanner.ReadWriteTransaction"]
+ assert gotNames == wantNames
+
+ # Check for conformance of enable_extended_tracing
+ lastSpan = from_inject_spans[len(from_inject_spans) - 1]
+ wantAnnotatedSQL = "SELECT 1"
+ if not enable_extended_tracing:
+ wantAnnotatedSQL = None
+ assert (
+ lastSpan.attributes.get("db.statement", None) == wantAnnotatedSQL
+ ) # "Mismatch in annotated sql"
+
+ try:
+ db.delete()
+ instance.delete()
+ except Exception:
+ pass
+
+ # Test the respective options for enable_extended_tracing
+ test_propagation(True)
+ test_propagation(False)
+
+
+def _make_credentials():
+ from google.auth.credentials import AnonymousCredentials
+
+ return AnonymousCredentials()
diff --git a/tests/system/test_session_api.py b/tests/system/test_session_api.py
index 5322527d12..b7337cb258 100644
--- a/tests/system/test_session_api.py
+++ b/tests/system/test_session_api.py
@@ -2018,17 +2018,20 @@ def test_execute_sql_w_manual_consume(sessions_database):
row_count = 3000
committed = _set_up_table(sessions_database, row_count)
- with sessions_database.snapshot(read_timestamp=committed) as snapshot:
- streamed = snapshot.execute_sql(sd.SQL)
+ for lazy_decode in [False, True]:
+ with sessions_database.snapshot(read_timestamp=committed) as snapshot:
+ streamed = snapshot.execute_sql(sd.SQL, lazy_decode=lazy_decode)
- keyset = spanner_v1.KeySet(all_=True)
+ keyset = spanner_v1.KeySet(all_=True)
- with sessions_database.snapshot(read_timestamp=committed) as snapshot:
- rows = list(snapshot.read(sd.TABLE, sd.COLUMNS, keyset))
+ with sessions_database.snapshot(read_timestamp=committed) as snapshot:
+ rows = list(
+ snapshot.read(sd.TABLE, sd.COLUMNS, keyset, lazy_decode=lazy_decode)
+ )
- assert list(streamed) == rows
- assert streamed._current_row == []
- assert streamed._pending_chunk is None
+ assert list(streamed) == rows
+ assert streamed._current_row == []
+ assert streamed._pending_chunk is None
def test_execute_sql_w_to_dict_list(sessions_database):
@@ -2057,16 +2060,23 @@ def _check_sql_results(
if order and "ORDER" not in sql:
sql += " ORDER BY pkey"
- with database.snapshot() as snapshot:
- rows = list(
- snapshot.execute_sql(
- sql, params=params, param_types=param_types, column_info=column_info
+ for lazy_decode in [False, True]:
+ with database.snapshot() as snapshot:
+ iterator = snapshot.execute_sql(
+ sql,
+ params=params,
+ param_types=param_types,
+ column_info=column_info,
+ lazy_decode=lazy_decode,
)
- )
+ rows = list(iterator)
+ if lazy_decode:
+ for index, row in enumerate(rows):
+ rows[index] = iterator.decode_row(row)
- _sample_data._check_rows_data(
- rows, expected=expected, recurse_into_lists=recurse_into_lists
- )
+ _sample_data._check_rows_data(
+ rows, expected=expected, recurse_into_lists=recurse_into_lists
+ )
def test_multiuse_snapshot_execute_sql_isolation_strong(sessions_database):
diff --git a/tests/unit/spanner_dbapi/test_connection.py b/tests/unit/spanner_dbapi/test_connection.py
index d0fa521f8f..4bee9e93c7 100644
--- a/tests/unit/spanner_dbapi/test_connection.py
+++ b/tests/unit/spanner_dbapi/test_connection.py
@@ -138,6 +138,10 @@ def test_read_only_connection(self):
):
connection.read_only = False
+ # Verify that we can set the value to the same value as it already has.
+ connection.read_only = True
+ self.assertTrue(connection.read_only)
+
connection._spanner_transaction_started = False
connection.read_only = False
self.assertFalse(connection.read_only)
@@ -300,6 +304,19 @@ def test_commit_in_autocommit_mode(self, mock_warn):
CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2
)
+ @mock.patch.object(warnings, "warn")
+ def test_commit_in_autocommit_mode_with_ignore_warnings(self, mock_warn):
+ conn = self._make_connection(
+ DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED,
+ ignore_transaction_warnings=True,
+ )
+ assert conn._ignore_transaction_warnings
+ conn._autocommit = True
+
+ conn.commit()
+
+ assert not mock_warn.warn.called
+
def test_commit_database_error(self):
from google.cloud.spanner_dbapi import Connection
@@ -652,6 +669,20 @@ def test_staleness_inside_transaction(self):
with self.assertRaises(ValueError):
connection.staleness = {"read_timestamp": datetime.datetime(2021, 9, 21)}
+ def test_staleness_inside_transaction_same_value(self):
+ """
+ Verify that setting `staleness` to the same value in a transaction is allowed.
+ """
+ connection = self._make_connection()
+ connection.staleness = {"read_timestamp": datetime.datetime(2021, 9, 21)}
+ connection._spanner_transaction_started = True
+ connection._transaction = mock.Mock()
+
+ connection.staleness = {"read_timestamp": datetime.datetime(2021, 9, 21)}
+ self.assertEqual(
+ connection.staleness, {"read_timestamp": datetime.datetime(2021, 9, 21)}
+ )
+
def test_staleness_multi_use(self):
"""
Check that `staleness` option is correctly
diff --git a/tests/unit/spanner_dbapi/test_parse_utils.py b/tests/unit/spanner_dbapi/test_parse_utils.py
index 3a325014fa..4b1c7cdb06 100644
--- a/tests/unit/spanner_dbapi/test_parse_utils.py
+++ b/tests/unit/spanner_dbapi/test_parse_utils.py
@@ -218,6 +218,8 @@ def test_get_param_types(self):
params = {
"a1": 10,
"b1": "string",
+ # Note: We only want a value and not a type for this.
+ # Instead, we let Spanner infer the correct type (FLOAT64 or FLOAT32)
"c1": 10.39,
"d1": TimestampStr("2005-08-30T01:01:01.000001Z"),
"e1": DateStr("2019-12-05"),
@@ -232,7 +234,6 @@ def test_get_param_types(self):
want_types = {
"a1": param_types.INT64,
"b1": param_types.STRING,
- "c1": param_types.FLOAT64,
"d1": param_types.TIMESTAMP,
"e1": param_types.DATE,
"f1": param_types.BOOL,
diff --git a/tests/unit/test_pool.py b/tests/unit/test_pool.py
index 23ed3e7251..2e3b46fa73 100644
--- a/tests/unit/test_pool.py
+++ b/tests/unit/test_pool.py
@@ -15,6 +15,7 @@
from functools import total_ordering
import unittest
+from datetime import datetime, timedelta
import mock
@@ -184,13 +185,30 @@ def test_bind(self):
for session in SESSIONS:
session.create.assert_not_called()
- def test_get_non_expired(self):
+ def test_get_active(self):
pool = self._make_one(size=4)
database = _Database("name")
SESSIONS = sorted([_Session(database) for i in range(0, 4)])
database._sessions.extend(SESSIONS)
pool.bind(database)
+ # check if sessions returned in LIFO order
+ for i in (3, 2, 1, 0):
+ session = pool.get()
+ self.assertIs(session, SESSIONS[i])
+ self.assertFalse(session._exists_checked)
+ self.assertFalse(pool._sessions.full())
+
+ def test_get_non_expired(self):
+ pool = self._make_one(size=4)
+ database = _Database("name")
+ last_use_time = datetime.utcnow() - timedelta(minutes=56)
+ SESSIONS = sorted(
+ [_Session(database, last_use_time=last_use_time) for i in range(0, 4)]
+ )
+ database._sessions.extend(SESSIONS)
+ pool.bind(database)
+
# check if sessions returned in LIFO order
for i in (3, 2, 1, 0):
session = pool.get()
@@ -201,7 +219,8 @@ def test_get_non_expired(self):
def test_get_expired(self):
pool = self._make_one(size=4)
database = _Database("name")
- SESSIONS = [_Session(database)] * 5
+ last_use_time = datetime.utcnow() - timedelta(minutes=65)
+ SESSIONS = [_Session(database, last_use_time=last_use_time)] * 5
SESSIONS[0]._exists = False
database._sessions.extend(SESSIONS)
pool.bind(database)
@@ -915,7 +934,9 @@ def _make_transaction(*args, **kw):
class _Session(object):
_transaction = None
- def __init__(self, database, exists=True, transaction=None):
+ def __init__(
+ self, database, exists=True, transaction=None, last_use_time=datetime.utcnow()
+ ):
self._database = database
self._exists = exists
self._exists_checked = False
@@ -923,10 +944,15 @@ def __init__(self, database, exists=True, transaction=None):
self.create = mock.Mock()
self._deleted = False
self._transaction = transaction
+ self._last_use_time = last_use_time
def __lt__(self, other):
return id(self) < id(other)
+ @property
+ def last_use_time(self):
+ return self._last_use_time
+
def exists(self):
self._exists_checked = True
return self._exists