diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 5fc5daa31..02a4dedce 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,4 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:8555f0e37e6261408f792bfd6635102d2da5ad73f8f09bcb24f25e6afb5fac97 + digest: sha256:240b5bcc2bafd450912d2da2be15e62bc6de2cf839823ae4bf94d4f392b451dc +# created: 2023-06-03T21:25:37.968717478Z diff --git a/.kokoro/requirements.in b/.kokoro/requirements.in index 882178ce6..ec867d9fd 100644 --- a/.kokoro/requirements.in +++ b/.kokoro/requirements.in @@ -5,6 +5,6 @@ typing-extensions twine wheel setuptools -nox +nox>=2022.11.21 # required to remove dependency on py charset-normalizer<3 click<8.1.0 diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index fa99c1290..c7929db6d 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -1,6 +1,6 @@ # -# This file is autogenerated by pip-compile with python 3.10 -# To update, run: +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: # # pip-compile --allow-unsafe --generate-hashes requirements.in # @@ -113,28 +113,26 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==39.0.1 \ - --hash=sha256:0f8da300b5c8af9f98111ffd512910bc792b4c77392a9523624680f7956a99d4 \ - --hash=sha256:35f7c7d015d474f4011e859e93e789c87d21f6f4880ebdc29896a60403328f1f \ - --hash=sha256:5aa67414fcdfa22cf052e640cb5ddc461924a045cacf325cd164e65312d99502 \ - --hash=sha256:5d2d8b87a490bfcd407ed9d49093793d0f75198a35e6eb1a923ce1ee86c62b41 \ - --hash=sha256:6687ef6d0a6497e2b58e7c5b852b53f62142cfa7cd1555795758934da363a965 \ - --hash=sha256:6f8ba7f0328b79f08bdacc3e4e66fb4d7aab0c3584e0bd41328dce5262e26b2e \ - --hash=sha256:706843b48f9a3f9b9911979761c91541e3d90db1ca905fd63fee540a217698bc \ - --hash=sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad \ - --hash=sha256:83e17b26de248c33f3acffb922748151d71827d6021d98c70e6c1a25ddd78505 \ - --hash=sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388 \ - --hash=sha256:aec5a6c9864be7df2240c382740fcf3b96928c46604eaa7f3091f58b878c0bb6 \ - --hash=sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2 \ - --hash=sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac \ - --hash=sha256:d1f6198ee6d9148405e49887803907fe8962a23e6c6f83ea7d98f1c0de375695 \ - --hash=sha256:e124352fd3db36a9d4a21c1aa27fd5d051e621845cb87fb851c08f4f75ce8be6 \ - --hash=sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336 \ - --hash=sha256:ef8b72fa70b348724ff1218267e7f7375b8de4e8194d1636ee60510aae104cd0 \ - --hash=sha256:f0c64d1bd842ca2633e74a1a28033d139368ad959872533b1bab8c80e8240a0c \ - --hash=sha256:f24077a3b5298a5a06a8e0536e3ea9ec60e4c7ac486755e5fb6e6ea9b3500106 \ - --hash=sha256:fdd188c8a6ef8769f148f88f859884507b954cc64db6b52f66ef199bb9ad660a \ - --hash=sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8 +cryptography==41.0.0 \ + --hash=sha256:0ddaee209d1cf1f180f1efa338a68c4621154de0afaef92b89486f5f96047c55 \ + --hash=sha256:14754bcdae909d66ff24b7b5f166d69340ccc6cb15731670435efd5719294895 \ + --hash=sha256:344c6de9f8bda3c425b3a41b319522ba3208551b70c2ae00099c205f0d9fd3be \ + --hash=sha256:34d405ea69a8b34566ba3dfb0521379b210ea5d560fafedf9f800a9a94a41928 \ + --hash=sha256:3680248309d340fda9611498a5319b0193a8dbdb73586a1acf8109d06f25b92d \ + --hash=sha256:3c5ef25d060c80d6d9f7f9892e1d41bb1c79b78ce74805b8cb4aa373cb7d5ec8 \ + --hash=sha256:4ab14d567f7bbe7f1cdff1c53d5324ed4d3fc8bd17c481b395db224fb405c237 \ + --hash=sha256:5c1f7293c31ebc72163a9a0df246f890d65f66b4a40d9ec80081969ba8c78cc9 \ + --hash=sha256:6b71f64beeea341c9b4f963b48ee3b62d62d57ba93eb120e1196b31dc1025e78 \ + --hash=sha256:7d92f0248d38faa411d17f4107fc0bce0c42cae0b0ba5415505df72d751bf62d \ + --hash=sha256:8362565b3835ceacf4dc8f3b56471a2289cf51ac80946f9087e66dc283a810e0 \ + --hash=sha256:84a165379cb9d411d58ed739e4af3396e544eac190805a54ba2e0322feb55c46 \ + --hash=sha256:88ff107f211ea696455ea8d911389f6d2b276aabf3231bf72c8853d22db755c5 \ + --hash=sha256:9f65e842cb02550fac96536edb1d17f24c0a338fd84eaf582be25926e993dde4 \ + --hash=sha256:a4fc68d1c5b951cfb72dfd54702afdbbf0fb7acdc9b7dc4301bbf2225a27714d \ + --hash=sha256:b7f2f5c525a642cecad24ee8670443ba27ac1fab81bba4cc24c7b6b41f2d0c75 \ + --hash=sha256:b846d59a8d5a9ba87e2c3d757ca019fa576793e8758174d3868aecb88d6fc8eb \ + --hash=sha256:bf8fc66012ca857d62f6a347007e166ed59c0bc150cefa49f28376ebe7d992a2 \ + --hash=sha256:f5d0bf9b252f30a31664b6f64432b4730bb7038339bd18b1fafe129cfc2be9be # via # gcp-releasetool # secretstorage @@ -335,9 +333,9 @@ more-itertools==9.0.0 \ --hash=sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41 \ --hash=sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab # via jaraco-classes -nox==2022.8.7 \ - --hash=sha256:1b894940551dc5c389f9271d197ca5d655d40bdc6ccf93ed6880e4042760a34b \ - --hash=sha256:96cca88779e08282a699d672258ec01eb7c792d35bbbf538c723172bce23212c +nox==2022.11.21 \ + --hash=sha256:0e41a990e290e274cb205a976c4c97ee3c5234441a8132c8c3fd9ea3c22149eb \ + --hash=sha256:e21c31de0711d1274ca585a2c5fde36b1aa962005ba8e9322bf5eeed16dcd684 # via -r requirements.in packaging==21.3 \ --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ @@ -380,10 +378,6 @@ protobuf==3.20.3 \ # gcp-docuploader # gcp-releasetool # google-api-core -py==1.11.0 \ - --hash=sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719 \ - --hash=sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378 - # via nox pyasn1==0.4.8 \ --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \ --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba @@ -423,9 +417,9 @@ readme-renderer==37.3 \ --hash=sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273 \ --hash=sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343 # via twine -requests==2.28.1 \ - --hash=sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983 \ - --hash=sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349 +requests==2.31.0 \ + --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ + --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 # via # gcp-releasetool # google-api-core diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 882f663e6..b7f666a68 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.17.0" + ".": "2.19.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 78b4d1b29..dc80386a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,49 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.19.0](https://github.com/googleapis/python-bigtable/compare/v2.18.1...v2.19.0) (2023-06-08) + + +### Features + +* Add ChangeStreamConfig to CreateTable and UpdateTable ([#786](https://github.com/googleapis/python-bigtable/issues/786)) ([cef70f2](https://github.com/googleapis/python-bigtable/commit/cef70f243541820225f86a520e0b2abd3a7354f7)) + + +### Bug Fixes + +* Add a callback function on flush_rows ([#796](https://github.com/googleapis/python-bigtable/issues/796)) ([589aa5d](https://github.com/googleapis/python-bigtable/commit/589aa5d04f6b5a2bd310d0bf06aeb7058fb6fcd2)) + + +### Documentation + +* **samples:** Add region tags ([#788](https://github.com/googleapis/python-bigtable/issues/788)) ([ecf539c](https://github.com/googleapis/python-bigtable/commit/ecf539c4c976fd9e5505b8abf0b697b218f09fef)) + +## [2.18.1](https://github.com/googleapis/python-bigtable/compare/v2.18.0...v2.18.1) (2023-05-11) + + +### Bug Fixes + +* Revert "Feat: Threaded MutationsBatcher" ([#773](https://github.com/googleapis/python-bigtable/issues/773)) ([a767cff](https://github.com/googleapis/python-bigtable/commit/a767cff95d990994f85f5fd05cc10f952087b49d)) + +## [2.18.0](https://github.com/googleapis/python-bigtable/compare/v2.17.0...v2.18.0) (2023-05-10) + + +### Features + +* Publish RateLimitInfo and FeatureFlag protos ([#768](https://github.com/googleapis/python-bigtable/issues/768)) ([171fea6](https://github.com/googleapis/python-bigtable/commit/171fea6de57a47f92a2a56050f8bfe7518144df7)) +* Threaded MutationsBatcher ([#722](https://github.com/googleapis/python-bigtable/issues/722)) ([7521a61](https://github.com/googleapis/python-bigtable/commit/7521a617c121ead96a21ca47959a53b2db2da090)) + + +### Bug Fixes + +* Pass the "retry" when calling read_rows. ([#759](https://github.com/googleapis/python-bigtable/issues/759)) ([505273b](https://github.com/googleapis/python-bigtable/commit/505273b72bf83d8f92d0e0a92d62f22bce96cc3d)) + + +### Documentation + +* Fix delete from column family example ([#764](https://github.com/googleapis/python-bigtable/issues/764)) ([128b4e1](https://github.com/googleapis/python-bigtable/commit/128b4e1f3eea2dad903d84c8f2933b17a5f0d226)) +* Fix formatting of request arg in docstring ([#756](https://github.com/googleapis/python-bigtable/issues/756)) ([45d3e43](https://github.com/googleapis/python-bigtable/commit/45d3e4308c4f494228c2e6e18a36285c557cb0c3)) + ## [2.17.0](https://github.com/googleapis/python-bigtable/compare/v2.16.0...v2.17.0) (2023-03-01) diff --git a/docs/app-profile.rst b/docs/app-profile.rst index 50e57c179..5c9d426c2 100644 --- a/docs/app-profile.rst +++ b/docs/app-profile.rst @@ -1,6 +1,6 @@ App Profile ~~~~~~~~~~~ -.. automodule:: google.cloud.bigtable.deprecated.app_profile +.. automodule:: google.cloud.bigtable.app_profile :members: :show-inheritance: diff --git a/docs/backup.rst b/docs/backup.rst index 46c32c91b..e75abd431 100644 --- a/docs/backup.rst +++ b/docs/backup.rst @@ -1,6 +1,6 @@ Backup ~~~~~~~~ -.. automodule:: google.cloud.bigtable.deprecated.backup +.. automodule:: google.cloud.bigtable.backup :members: :show-inheritance: diff --git a/docs/batcher.rst b/docs/batcher.rst new file mode 100644 index 000000000..9ac335be1 --- /dev/null +++ b/docs/batcher.rst @@ -0,0 +1,6 @@ +Mutations Batching +~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.batcher + :members: + :show-inheritance: diff --git a/docs/client-intro.rst b/docs/client-intro.rst index d75cf5f96..242068499 100644 --- a/docs/client-intro.rst +++ b/docs/client-intro.rst @@ -1,21 +1,21 @@ Base for Everything =================== -To use the API, the :class:`Client ` +To use the API, the :class:`Client ` class defines a high-level interface which handles authorization and creating other objects: .. code:: python - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client client = Client() Long-lived Defaults ------------------- -When creating a :class:`Client `, the +When creating a :class:`Client `, the ``user_agent`` argument has sensible a default -(:data:`DEFAULT_USER_AGENT `). +(:data:`DEFAULT_USER_AGENT `). However, you may over-ride it and the value will be used throughout all API requests made with the ``client`` you create. @@ -38,14 +38,14 @@ Configuration .. code:: - >>> import google.cloud.deprecated as bigtable + >>> from google.cloud import bigtable >>> client = bigtable.Client() or pass in ``credentials`` and ``project`` explicitly .. code:: - >>> import google.cloud.deprecated as bigtable + >>> from google.cloud import bigtable >>> client = bigtable.Client(project='my-project', credentials=creds) .. tip:: @@ -73,15 +73,15 @@ you can pass the ``read_only`` argument: client = bigtable.Client(read_only=True) This will ensure that the -:data:`READ_ONLY_SCOPE ` is used +:data:`READ_ONLY_SCOPE ` is used for API requests (so any accidental requests that would modify data will fail). Next Step --------- -After a :class:`Client `, the next highest-level -object is an :class:`Instance `. You'll need +After a :class:`Client `, the next highest-level +object is an :class:`Instance `. You'll need one before you can interact with tables or data. Head next to learn about the :doc:`instance-api`. diff --git a/docs/client.rst b/docs/client.rst index df92a9861..c48595c8a 100644 --- a/docs/client.rst +++ b/docs/client.rst @@ -1,6 +1,6 @@ Client ~~~~~~ -.. automodule:: google.cloud.bigtable.deprecated.client +.. automodule:: google.cloud.bigtable.client :members: :show-inheritance: diff --git a/docs/cluster.rst b/docs/cluster.rst index 9747b226f..ad33aae5e 100644 --- a/docs/cluster.rst +++ b/docs/cluster.rst @@ -1,6 +1,6 @@ Cluster ~~~~~~~ -.. automodule:: google.cloud.bigtable.deprecated.cluster +.. automodule:: google.cloud.bigtable.cluster :members: :show-inheritance: diff --git a/docs/column-family.rst b/docs/column-family.rst index 39095000d..de6c1eb1f 100644 --- a/docs/column-family.rst +++ b/docs/column-family.rst @@ -2,7 +2,7 @@ Column Families =============== When creating a -:class:`ColumnFamily `, it is +:class:`ColumnFamily `, it is possible to set garbage collection rules for expired data. By setting a rule, cells in the table matching the rule will be deleted @@ -10,19 +10,19 @@ during periodic garbage collection (which executes opportunistically in the background). The types -:class:`MaxAgeGCRule `, -:class:`MaxVersionsGCRule `, -:class:`GarbageCollectionRuleUnion ` and -:class:`GarbageCollectionRuleIntersection ` +:class:`MaxAgeGCRule `, +:class:`MaxVersionsGCRule `, +:class:`GarbageCollectionRuleUnion ` and +:class:`GarbageCollectionRuleIntersection ` can all be used as the optional ``gc_rule`` argument in the -:class:`ColumnFamily ` +:class:`ColumnFamily ` constructor. This value is then used in the -:meth:`create() ` and -:meth:`update() ` methods. +:meth:`create() ` and +:meth:`update() ` methods. These rules can be nested arbitrarily, with a -:class:`MaxAgeGCRule ` or -:class:`MaxVersionsGCRule ` +:class:`MaxAgeGCRule ` or +:class:`MaxVersionsGCRule ` at the lowest level of the nesting: .. code:: python @@ -44,6 +44,6 @@ at the lowest level of the nesting: ---- -.. automodule:: google.cloud.bigtable.deprecated.column_family +.. automodule:: google.cloud.bigtable.column_family :members: :show-inheritance: diff --git a/docs/data-api.rst b/docs/data-api.rst index e68835d1a..01a49178f 100644 --- a/docs/data-api.rst +++ b/docs/data-api.rst @@ -1,7 +1,7 @@ Data API ======== -After creating a :class:`Table ` and some +After creating a :class:`Table ` and some column families, you are ready to store and retrieve data. Cells vs. Columns vs. Column Families @@ -27,7 +27,7 @@ Modifying Data Since data is stored in cells, which are stored in rows, we use the metaphor of a **row** in classes that are used to modify (write, update, delete) data in a -:class:`Table `. +:class:`Table `. Direct vs. Conditional vs. Append --------------------------------- @@ -38,26 +38,26 @@ methods. * The **direct** way is via `MutateRow`_ which involves simply adding, overwriting or deleting cells. The - :class:`DirectRow ` class + :class:`DirectRow ` class handles direct mutations. * The **conditional** way is via `CheckAndMutateRow`_. This method first checks if some filter is matched in a given row, then applies one of two sets of mutations, depending on if a match occurred or not. (These mutation sets are called the "true mutations" and "false mutations".) The - :class:`ConditionalRow ` class + :class:`ConditionalRow ` class handles conditional mutations. * The **append** way is via `ReadModifyWriteRow`_. This simply appends (as bytes) or increments (as an integer) data in a presumed existing cell in a row. The - :class:`AppendRow ` class + :class:`AppendRow ` class handles append mutations. Row Factory ----------- A single factory can be used to create any of the three row types. -To create a :class:`DirectRow `: +To create a :class:`DirectRow `: .. code:: python @@ -66,15 +66,15 @@ To create a :class:`DirectRow `: Unlike the previous string values we've used before, the row key must be ``bytes``. -To create a :class:`ConditionalRow `, -first create a :class:`RowFilter ` and +To create a :class:`ConditionalRow `, +first create a :class:`RowFilter ` and then .. code:: python cond_row = table.row(row_key, filter_=filter_) -To create an :class:`AppendRow ` +To create an :class:`AppendRow ` .. code:: python @@ -95,7 +95,7 @@ Direct Mutations Direct mutations can be added via one of four methods -* :meth:`set_cell() ` allows a +* :meth:`set_cell() ` allows a single value to be written to a column .. code:: python @@ -109,7 +109,7 @@ Direct mutations can be added via one of four methods The value can either be bytes or an integer, which will be converted to bytes as a signed 64-bit integer. -* :meth:`delete_cell() ` deletes +* :meth:`delete_cell() ` deletes all cells (i.e. for all timestamps) in a given column .. code:: python @@ -119,7 +119,7 @@ Direct mutations can be added via one of four methods Remember, this only happens in the ``row`` we are using. If we only want to delete cells from a limited range of time, a - :class:`TimestampRange ` can + :class:`TimestampRange ` can be used .. code:: python @@ -127,9 +127,9 @@ Direct mutations can be added via one of four methods row.delete_cell(column_family_id, column, time_range=time_range) -* :meth:`delete_cells() ` does +* :meth:`delete_cells() ` does the same thing as - :meth:`delete_cell() `, + :meth:`delete_cell() `, but accepts a list of columns in a column family rather than a single one. .. code:: python @@ -138,7 +138,7 @@ Direct mutations can be added via one of four methods time_range=time_range) In addition, if we want to delete cells from every column in a column family, - the special :attr:`ALL_COLUMNS ` + the special :attr:`ALL_COLUMNS ` value can be used .. code:: python @@ -146,7 +146,7 @@ Direct mutations can be added via one of four methods row.delete_cells(column_family_id, row.ALL_COLUMNS, time_range=time_range) -* :meth:`delete() ` will delete the +* :meth:`delete() ` will delete the entire row .. code:: python @@ -177,14 +177,14 @@ Append Mutations Append mutations can be added via one of two methods -* :meth:`append_cell_value() ` +* :meth:`append_cell_value() ` appends a bytes value to an existing cell: .. code:: python append_row.append_cell_value(column_family_id, column, bytes_value) -* :meth:`increment_cell_value() ` +* :meth:`increment_cell_value() ` increments an integer value in an existing cell: .. code:: python @@ -217,7 +217,7 @@ Read Single Row from a Table ---------------------------- To make a `ReadRows`_ API request for a single row key, use -:meth:`Table.read_row() `: +:meth:`Table.read_row() `: .. code:: python @@ -226,34 +226,34 @@ To make a `ReadRows`_ API request for a single row key, use { u'fam1': { b'col1': [ - , - , + , + , ], b'col2': [ - , + , ], }, u'fam2': { b'col3': [ - , - , - , + , + , + , ], }, } >>> cell = row_data.cells[u'fam1'][b'col1'][0] >>> cell - + >>> cell.value b'val1' >>> cell.timestamp datetime.datetime(2016, 2, 27, 3, 41, 18, 122823, tzinfo=) -Rather than returning a :class:`DirectRow ` +Rather than returning a :class:`DirectRow ` or similar class, this method returns a -:class:`PartialRowData ` +:class:`PartialRowData ` instance. This class is used for reading and parsing data rather than for -modifying data (as :class:`DirectRow ` is). +modifying data (as :class:`DirectRow ` is). A filter can also be applied to the results: @@ -262,15 +262,15 @@ A filter can also be applied to the results: row_data = table.read_row(row_key, filter_=filter_val) The allowable ``filter_`` values are the same as those used for a -:class:`ConditionalRow `. For +:class:`ConditionalRow `. For more information, see the -:meth:`Table.read_row() ` documentation. +:meth:`Table.read_row() ` documentation. Stream Many Rows from a Table ----------------------------- To make a `ReadRows`_ API request for a stream of rows, use -:meth:`Table.read_rows() `: +:meth:`Table.read_rows() `: .. code:: python @@ -279,32 +279,32 @@ To make a `ReadRows`_ API request for a stream of rows, use Using gRPC over HTTP/2, a continual stream of responses will be delivered. In particular -* :meth:`consume_next() ` +* :meth:`consume_next() ` pulls the next result from the stream, parses it and stores it on the - :class:`PartialRowsData ` instance -* :meth:`consume_all() ` + :class:`PartialRowsData ` instance +* :meth:`consume_all() ` pulls results from the stream until there are no more -* :meth:`cancel() ` closes +* :meth:`cancel() ` closes the stream -See the :class:`PartialRowsData ` +See the :class:`PartialRowsData ` documentation for more information. As with -:meth:`Table.read_row() `, an optional +:meth:`Table.read_row() `, an optional ``filter_`` can be applied. In addition a ``start_key`` and / or ``end_key`` can be supplied for the stream, a ``limit`` can be set and a boolean ``allow_row_interleaving`` can be specified to allow faster streamed results at the potential cost of non-sequential reads. -See the :meth:`Table.read_rows() ` +See the :meth:`Table.read_rows() ` documentation for more information on the optional arguments. Sample Keys in a Table ---------------------- Make a `SampleRowKeys`_ API request with -:meth:`Table.sample_row_keys() `: +:meth:`Table.sample_row_keys() `: .. code:: python @@ -315,7 +315,7 @@ approximately equal size, which can be used to break up the data for distributed tasks like mapreduces. As with -:meth:`Table.read_rows() `, the +:meth:`Table.read_rows() `, the returned ``keys_iterator`` is connected to a cancellable HTTP/2 stream. The next key in the result can be accessed via diff --git a/docs/encryption-info.rst b/docs/encryption-info.rst index 62b77ea0c..46f19880f 100644 --- a/docs/encryption-info.rst +++ b/docs/encryption-info.rst @@ -1,6 +1,6 @@ Encryption Info ~~~~~~~~~~~~~~~ -.. automodule:: google.cloud.bigtable.deprecated.encryption_info +.. automodule:: google.cloud.bigtable.encryption_info :members: :show-inheritance: diff --git a/docs/instance-api.rst b/docs/instance-api.rst index 78123e8ca..88b4eb4dc 100644 --- a/docs/instance-api.rst +++ b/docs/instance-api.rst @@ -1,7 +1,7 @@ Instance Admin API ================== -After creating a :class:`Client `, you can +After creating a :class:`Client `, you can interact with individual instances for a project. List Instances @@ -9,7 +9,7 @@ List Instances If you want a comprehensive list of all existing instances, make a `ListInstances`_ API request with -:meth:`Client.list_instances() `: +:meth:`Client.list_instances() `: .. code:: python @@ -18,7 +18,7 @@ If you want a comprehensive list of all existing instances, make a Instance Factory ---------------- -To create an :class:`Instance ` object: +To create an :class:`Instance ` object: .. code:: python @@ -40,7 +40,7 @@ Create a new Instance --------------------- After creating the instance object, make a `CreateInstance`_ API request -with :meth:`create() `: +with :meth:`create() `: .. code:: python @@ -54,14 +54,14 @@ Check on Current Operation When modifying an instance (via a `CreateInstance`_ request), the Bigtable API will return a `long-running operation`_ and a corresponding - :class:`Operation ` object + :class:`Operation ` object will be returned by - :meth:`create() `. + :meth:`create() `. You can check if a long-running operation (for a -:meth:`create() ` has finished +:meth:`create() ` has finished by making a `GetOperation`_ request with -:meth:`Operation.finished() `: +:meth:`Operation.finished() `: .. code:: python @@ -71,18 +71,18 @@ by making a `GetOperation`_ request with .. note:: - Once an :class:`Operation ` object + Once an :class:`Operation ` object has returned :data:`True` from - :meth:`finished() `, the + :meth:`finished() `, the object should not be re-used. Subsequent calls to - :meth:`finished() ` + :meth:`finished() ` will result in a :class:`ValueError `. Get metadata for an existing Instance ------------------------------------- After creating the instance object, make a `GetInstance`_ API request -with :meth:`reload() `: +with :meth:`reload() `: .. code:: python @@ -94,7 +94,7 @@ Update an existing Instance --------------------------- After creating the instance object, make an `UpdateInstance`_ API request -with :meth:`update() `: +with :meth:`update() `: .. code:: python @@ -105,7 +105,7 @@ Delete an existing Instance --------------------------- Make a `DeleteInstance`_ API request with -:meth:`delete() `: +:meth:`delete() `: .. code:: python @@ -115,8 +115,8 @@ Next Step --------- Now we go down the hierarchy from -:class:`Instance ` to a -:class:`Table `. +:class:`Instance ` to a +:class:`Table `. Head next to learn about the :doc:`table-api`. diff --git a/docs/instance.rst b/docs/instance.rst index 3a61faf1c..f9be9672f 100644 --- a/docs/instance.rst +++ b/docs/instance.rst @@ -1,6 +1,6 @@ Instance ~~~~~~~~ -.. automodule:: google.cloud.bigtable.deprecated.instance +.. automodule:: google.cloud.bigtable.instance :members: :show-inheritance: diff --git a/docs/row-data.rst b/docs/row-data.rst index b9013ebf5..503f9b1cb 100644 --- a/docs/row-data.rst +++ b/docs/row-data.rst @@ -1,6 +1,6 @@ Row Data ~~~~~~~~ -.. automodule:: google.cloud.bigtable.deprecated.row_data +.. automodule:: google.cloud.bigtable.row_data :members: :show-inheritance: diff --git a/docs/row-filters.rst b/docs/row-filters.rst index 8d1fac46b..9884ce400 100644 --- a/docs/row-filters.rst +++ b/docs/row-filters.rst @@ -2,11 +2,11 @@ Bigtable Row Filters ==================== It is possible to use a -:class:`RowFilter ` +:class:`RowFilter ` when adding mutations to a -:class:`ConditionalRow ` and when -reading row data with :meth:`read_row() ` -or :meth:`read_rows() `. +:class:`ConditionalRow ` and when +reading row data with :meth:`read_row() ` +or :meth:`read_rows() `. As laid out in the `RowFilter definition`_, the following basic filters are provided: @@ -60,8 +60,8 @@ level. For example: ---- -.. automodule:: google.cloud.bigtable.deprecated.row_filters +.. automodule:: google.cloud.bigtable.row_filters :members: :show-inheritance: -.. _RowFilter definition: https://googleapis.dev/python/bigtable/latest/row-filters.html?highlight=rowfilter#google.cloud.bigtable.deprecated.row_filters.RowFilter +.. _RowFilter definition: https://googleapis.dev/python/bigtable/latest/row-filters.html?highlight=rowfilter#google.cloud.bigtable.row_filters.RowFilter diff --git a/docs/row-set.rst b/docs/row-set.rst index 92cd107e8..5f7a16a02 100644 --- a/docs/row-set.rst +++ b/docs/row-set.rst @@ -1,6 +1,6 @@ Row Set ~~~~~~~~ -.. automodule:: google.cloud.bigtable.deprecated.row_set +.. automodule:: google.cloud.bigtable.row_set :members: :show-inheritance: diff --git a/docs/row.rst b/docs/row.rst index e8fa48cdd..33686608b 100644 --- a/docs/row.rst +++ b/docs/row.rst @@ -1,7 +1,7 @@ Bigtable Row ============ -.. automodule:: google.cloud.bigtable.deprecated.row +.. automodule:: google.cloud.bigtable.row :members: :show-inheritance: :inherited-members: diff --git a/docs/snippets.py b/docs/snippets.py index 084f10270..1d93fdf12 100644 --- a/docs/snippets.py +++ b/docs/snippets.py @@ -16,7 +16,7 @@ """Testable usage examples for Google Cloud Bigtable API wrapper Each example function takes a ``client`` argument (which must be an instance -of :class:`google.cloud.bigtable.deprecated.client.Client`) and uses it to perform a task +of :class:`google.cloud.bigtable.client.Client`) and uses it to perform a task with the API. To facilitate running the examples as system tests, each example is also passed @@ -40,8 +40,8 @@ from test_utils.retry import RetryErrors from google.cloud._helpers import UTC -from google.cloud.bigtable.deprecated import Client -from google.cloud.bigtable.deprecated import enums +from google.cloud.bigtable import Client +from google.cloud.bigtable import enums UNIQUE_SUFFIX = unique_resource_id("-") @@ -110,8 +110,8 @@ def teardown_module(): def test_bigtable_create_instance(): # [START bigtable_api_create_prod_instance] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import Client + from google.cloud.bigtable import enums my_instance_id = "inst-my-" + UNIQUE_SUFFIX my_cluster_id = "clus-my-" + UNIQUE_SUFFIX @@ -144,8 +144,8 @@ def test_bigtable_create_instance(): def test_bigtable_create_additional_cluster(): # [START bigtable_api_create_cluster] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import Client + from google.cloud.bigtable import enums # Assuming that there is an existing instance with `INSTANCE_ID` # on the server already. @@ -181,8 +181,8 @@ def test_bigtable_create_reload_delete_app_profile(): import re # [START bigtable_api_create_app_profile] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import Client + from google.cloud.bigtable import enums routing_policy_type = enums.RoutingPolicyType.ANY @@ -202,7 +202,7 @@ def test_bigtable_create_reload_delete_app_profile(): # [END bigtable_api_create_app_profile] # [START bigtable_api_app_profile_name] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -219,7 +219,7 @@ def test_bigtable_create_reload_delete_app_profile(): assert _profile_name_re.match(app_profile_name) # [START bigtable_api_app_profile_exists] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -230,7 +230,7 @@ def test_bigtable_create_reload_delete_app_profile(): assert app_profile_exists # [START bigtable_api_reload_app_profile] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -241,7 +241,7 @@ def test_bigtable_create_reload_delete_app_profile(): assert app_profile.routing_policy_type == ROUTING_POLICY_TYPE # [START bigtable_api_update_app_profile] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -255,7 +255,7 @@ def test_bigtable_create_reload_delete_app_profile(): assert app_profile.description == description # [START bigtable_api_delete_app_profile] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -269,7 +269,7 @@ def test_bigtable_create_reload_delete_app_profile(): def test_bigtable_list_instances(): # [START bigtable_api_list_instances] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) (instances_list, failed_locations_list) = client.list_instances() @@ -280,7 +280,7 @@ def test_bigtable_list_instances(): def test_bigtable_list_clusters_on_instance(): # [START bigtable_api_list_clusters_on_instance] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -292,7 +292,7 @@ def test_bigtable_list_clusters_on_instance(): def test_bigtable_list_clusters_in_project(): # [START bigtable_api_list_clusters_in_project] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) (clusters_list, failed_locations_list) = client.list_clusters() @@ -309,7 +309,7 @@ def test_bigtable_list_app_profiles(): app_profile = app_profile.create(ignore_warnings=True) # [START bigtable_api_list_app_profiles] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -325,7 +325,7 @@ def test_bigtable_list_app_profiles(): def test_bigtable_instance_exists(): # [START bigtable_api_check_instance_exists] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -337,7 +337,7 @@ def test_bigtable_instance_exists(): def test_bigtable_cluster_exists(): # [START bigtable_api_check_cluster_exists] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -350,7 +350,7 @@ def test_bigtable_cluster_exists(): def test_bigtable_reload_instance(): # [START bigtable_api_reload_instance] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -362,7 +362,7 @@ def test_bigtable_reload_instance(): def test_bigtable_reload_cluster(): # [START bigtable_api_reload_cluster] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -375,7 +375,7 @@ def test_bigtable_reload_cluster(): def test_bigtable_update_instance(): # [START bigtable_api_update_instance] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -389,7 +389,7 @@ def test_bigtable_update_instance(): def test_bigtable_update_cluster(): # [START bigtable_api_update_cluster] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -403,7 +403,7 @@ def test_bigtable_update_cluster(): def test_bigtable_cluster_disable_autoscaling(): # [START bigtable_api_cluster_disable_autoscaling] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -424,8 +424,8 @@ def test_bigtable_create_table(): # [START bigtable_api_create_table] from google.api_core import exceptions from google.api_core import retry - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated import column_family + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -450,7 +450,7 @@ def test_bigtable_create_table(): def test_bigtable_list_tables(): # [START bigtable_api_list_tables] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -463,7 +463,7 @@ def test_bigtable_list_tables(): def test_bigtable_delete_cluster(): - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -480,7 +480,7 @@ def test_bigtable_delete_cluster(): operation.result(timeout=1000) # [START bigtable_api_delete_cluster] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -493,7 +493,7 @@ def test_bigtable_delete_cluster(): def test_bigtable_delete_instance(): - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) @@ -515,7 +515,7 @@ def test_bigtable_delete_instance(): INSTANCES_TO_DELETE.append(instance) # [START bigtable_api_delete_instance] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) @@ -531,7 +531,7 @@ def test_bigtable_delete_instance(): def test_bigtable_test_iam_permissions(): # [START bigtable_api_test_iam_permissions] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -547,9 +547,9 @@ def test_bigtable_set_iam_policy_then_get_iam_policy(): service_account_email = Config.CLIENT._credentials.service_account_email # [START bigtable_api_set_iam_policy] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated.policy import Policy - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable import Client + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -563,7 +563,7 @@ def test_bigtable_set_iam_policy_then_get_iam_policy(): assert len(policy_latest.bigtable_admins) > 0 # [START bigtable_api_get_iam_policy] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -577,7 +577,7 @@ def test_bigtable_project_path(): import re # [START bigtable_api_project_path] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) project_path = client.project_path @@ -586,7 +586,7 @@ def test_bigtable_project_path(): def test_bigtable_table_data_client(): # [START bigtable_api_table_data_client] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) table_data_client = client.table_data_client @@ -595,7 +595,7 @@ def test_bigtable_table_data_client(): def test_bigtable_table_admin_client(): # [START bigtable_api_table_admin_client] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) table_admin_client = client.table_admin_client @@ -604,7 +604,7 @@ def test_bigtable_table_admin_client(): def test_bigtable_instance_admin_client(): # [START bigtable_api_instance_admin_client] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance_admin_client = client.instance_admin_client @@ -615,9 +615,9 @@ def test_bigtable_admins_policy(): service_account_email = Config.CLIENT._credentials.service_account_email # [START bigtable_api_admins_policy] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated.policy import Policy - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable import Client + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -636,9 +636,9 @@ def test_bigtable_readers_policy(): service_account_email = Config.CLIENT._credentials.service_account_email # [START bigtable_api_readers_policy] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated.policy import Policy - from google.cloud.bigtable.deprecated.policy import BIGTABLE_READER_ROLE + from google.cloud.bigtable import Client + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -657,9 +657,9 @@ def test_bigtable_users_policy(): service_account_email = Config.CLIENT._credentials.service_account_email # [START bigtable_api_users_policy] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated.policy import Policy - from google.cloud.bigtable.deprecated.policy import BIGTABLE_USER_ROLE + from google.cloud.bigtable import Client + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -678,9 +678,9 @@ def test_bigtable_viewers_policy(): service_account_email = Config.CLIENT._credentials.service_account_email # [START bigtable_api_viewers_policy] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated.policy import Policy - from google.cloud.bigtable.deprecated.policy import BIGTABLE_VIEWER_ROLE + from google.cloud.bigtable import Client + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -699,7 +699,7 @@ def test_bigtable_instance_name(): import re # [START bigtable_api_instance_name] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -711,7 +711,7 @@ def test_bigtable_cluster_name(): import re # [START bigtable_api_cluster_name] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -722,7 +722,7 @@ def test_bigtable_cluster_name(): def test_bigtable_instance_from_pb(): # [START bigtable_api_instance_from_pb] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 client = Client(admin=True) @@ -741,7 +741,7 @@ def test_bigtable_instance_from_pb(): def test_bigtable_cluster_from_pb(): # [START bigtable_api_cluster_from_pb] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 client = Client(admin=True) @@ -767,7 +767,7 @@ def test_bigtable_cluster_from_pb(): def test_bigtable_instance_state(): # [START bigtable_api_instance_state] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -779,7 +779,7 @@ def test_bigtable_instance_state(): def test_bigtable_cluster_state(): # [START bigtable_api_cluster_state] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) diff --git a/docs/snippets_table.py b/docs/snippets_table.py index 72c342907..f27260425 100644 --- a/docs/snippets_table.py +++ b/docs/snippets_table.py @@ -16,7 +16,7 @@ """Testable usage examples for Google Cloud Bigtable API wrapper Each example function takes a ``client`` argument (which must be an instance -of :class:`google.cloud.bigtable.deprecated.client.Client`) and uses it to perform a task +of :class:`google.cloud.bigtable.client.Client`) and uses it to perform a task with the API. To facilitate running the examples as system tests, each example is also passed @@ -38,9 +38,9 @@ from test_utils.retry import RetryErrors from google.cloud._helpers import UTC -from google.cloud.bigtable.deprecated import Client -from google.cloud.bigtable.deprecated import enums -from google.cloud.bigtable.deprecated import column_family +from google.cloud.bigtable import Client +from google.cloud.bigtable import enums +from google.cloud.bigtable import column_family INSTANCE_ID = "snippet" + unique_resource_id("-") @@ -113,8 +113,8 @@ def teardown_module(): def test_bigtable_create_table(): # [START bigtable_api_create_table] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated import column_family + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -143,7 +143,7 @@ def test_bigtable_sample_row_keys(): assert table_sample.exists() # [START bigtable_api_sample_row_keys] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -159,7 +159,7 @@ def test_bigtable_sample_row_keys(): def test_bigtable_write_read_drop_truncate(): # [START bigtable_api_mutate_rows] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -190,7 +190,7 @@ def test_bigtable_write_read_drop_truncate(): # [END bigtable_api_mutate_rows] assert len(response) == len(rows) # [START bigtable_api_read_row] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -200,7 +200,7 @@ def test_bigtable_write_read_drop_truncate(): # [END bigtable_api_read_row] assert row.row_key.decode("utf-8") == row_key # [START bigtable_api_read_rows] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -218,7 +218,7 @@ def test_bigtable_write_read_drop_truncate(): # [END bigtable_api_read_rows] assert len(total_rows) == len(rows) # [START bigtable_api_drop_by_prefix] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -231,7 +231,7 @@ def test_bigtable_write_read_drop_truncate(): assert row.row_key.decode("utf-8") not in dropped_row_keys # [START bigtable_api_truncate_table] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -246,7 +246,7 @@ def test_bigtable_write_read_drop_truncate(): def test_bigtable_mutations_batcher(): # [START bigtable_api_mutations_batcher] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -297,7 +297,7 @@ def test_bigtable_mutations_batcher(): def test_bigtable_table_column_family(): # [START bigtable_api_table_column_family] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -311,7 +311,7 @@ def test_bigtable_table_column_family(): def test_bigtable_list_tables(): # [START bigtable_api_list_tables] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -324,7 +324,7 @@ def test_bigtable_table_name(): import re # [START bigtable_api_table_name] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -342,7 +342,7 @@ def test_bigtable_table_name(): def test_bigtable_list_column_families(): # [START bigtable_api_list_column_families] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -356,7 +356,7 @@ def test_bigtable_list_column_families(): def test_bigtable_get_cluster_states(): # [START bigtable_api_get_cluster_states] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -374,7 +374,7 @@ def test_bigtable_table_test_iam_permissions(): assert table_policy.exists # [START bigtable_api_table_test_iam_permissions] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -392,9 +392,9 @@ def test_bigtable_table_set_iam_policy_then_get_iam_policy(): service_account_email = Config.CLIENT._credentials.service_account_email # [START bigtable_api_table_set_iam_policy] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated.policy import Policy - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable import Client + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -407,7 +407,7 @@ def test_bigtable_table_set_iam_policy_then_get_iam_policy(): assert len(policy_latest.bigtable_admins) > 0 # [START bigtable_api_table_get_iam_policy] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -419,7 +419,7 @@ def test_bigtable_table_set_iam_policy_then_get_iam_policy(): def test_bigtable_table_exists(): # [START bigtable_api_check_table_exists] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -435,7 +435,7 @@ def test_bigtable_delete_table(): assert table_del.exists() # [START bigtable_api_delete_table] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -448,7 +448,7 @@ def test_bigtable_delete_table(): def test_bigtable_table_row(): # [START bigtable_api_table_row] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -475,7 +475,7 @@ def test_bigtable_table_row(): def test_bigtable_table_append_row(): # [START bigtable_api_table_append_row] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -502,7 +502,7 @@ def test_bigtable_table_append_row(): def test_bigtable_table_direct_row(): # [START bigtable_api_table_direct_row] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -529,8 +529,8 @@ def test_bigtable_table_direct_row(): def test_bigtable_table_conditional_row(): # [START bigtable_api_table_conditional_row] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated.row_filters import PassAllFilter + from google.cloud.bigtable import Client + from google.cloud.bigtable.row_filters import PassAllFilter client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -558,7 +558,7 @@ def test_bigtable_table_conditional_row(): def test_bigtable_column_family_name(): # [START bigtable_api_column_family_name] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -581,8 +581,8 @@ def test_bigtable_column_family_name(): def test_bigtable_create_update_delete_column_family(): # [START bigtable_api_create_column_family] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated import column_family + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -598,8 +598,8 @@ def test_bigtable_create_update_delete_column_family(): assert column_families[column_family_id].gc_rule == gc_rule # [START bigtable_api_update_column_family] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated import column_family + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -617,8 +617,8 @@ def test_bigtable_create_update_delete_column_family(): assert updated_families[column_family_id].gc_rule == max_age_rule # [START bigtable_api_delete_column_family] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated import column_family + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -653,8 +653,8 @@ def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): Config.TABLE.mutate_rows(rows) # [START bigtable_api_add_row_key] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable import Client + from google.cloud.bigtable.row_set import RowSet client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -670,9 +670,9 @@ def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): assert found_row_keys == expected_row_keys # [START bigtable_api_add_row_range] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated.row_set import RowSet - from google.cloud.bigtable.deprecated.row_set import RowRange + from google.cloud.bigtable import Client + from google.cloud.bigtable.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -688,8 +688,8 @@ def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): assert found_row_keys == expected_row_keys # [START bigtable_api_row_range_from_keys] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable import Client + from google.cloud.bigtable.row_set import RowSet client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -723,8 +723,8 @@ def test_bigtable_add_row_range_with_prefix(): Config.TABLE.mutate_rows(rows) # [START bigtable_api_add_row_range_with_prefix] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable import Client + from google.cloud.bigtable.row_set import RowSet client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -747,7 +747,7 @@ def test_bigtable_add_row_range_with_prefix(): def test_bigtable_batcher_mutate_flush_mutate_rows(): # [START bigtable_api_batcher_mutate] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -769,7 +769,7 @@ def test_bigtable_batcher_mutate_flush_mutate_rows(): # [END bigtable_api_batcher_mutate] # [START bigtable_api_batcher_flush] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -795,7 +795,7 @@ def test_bigtable_batcher_mutate_flush_mutate_rows(): table.truncate(timeout=200) # [START bigtable_api_batcher_mutate_rows] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -829,8 +829,8 @@ def test_bigtable_batcher_mutate_flush_mutate_rows(): def test_bigtable_create_family_gc_max_age(): # [START bigtable_api_create_family_gc_max_age] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated import column_family + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -851,8 +851,8 @@ def test_bigtable_create_family_gc_max_age(): def test_bigtable_create_family_gc_max_versions(): # [START bigtable_api_create_family_gc_max_versions] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated import column_family + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -872,8 +872,8 @@ def test_bigtable_create_family_gc_max_versions(): def test_bigtable_create_family_gc_union(): # [START bigtable_api_create_family_gc_union] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated import column_family + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -898,8 +898,8 @@ def test_bigtable_create_family_gc_union(): def test_bigtable_create_family_gc_intersection(): # [START bigtable_api_create_family_gc_intersection] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated import column_family + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -927,8 +927,8 @@ def test_bigtable_create_family_gc_intersection(): def test_bigtable_create_family_gc_nested(): # [START bigtable_api_create_family_gc_nested] - from google.cloud.bigtable.deprecated import Client - from google.cloud.bigtable.deprecated import column_family + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -978,7 +978,7 @@ def test_bigtable_row_data_cells_cell_value_cell_values(): row.commit() # [START bigtable_api_row_data_cells] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -993,7 +993,7 @@ def test_bigtable_row_data_cells_cell_value_cell_values(): assert actual_cell_value == value # [START bigtable_api_row_cell_value] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1006,7 +1006,7 @@ def test_bigtable_row_data_cells_cell_value_cell_values(): assert cell_value == value # [START bigtable_api_row_cell_values] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1025,7 +1025,7 @@ def test_bigtable_row_data_cells_cell_value_cell_values(): row.commit() # [START bigtable_api_row_find_cells] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1042,7 +1042,7 @@ def test_bigtable_row_data_cells_cell_value_cell_values(): def test_bigtable_row_setcell_rowkey(): # [START bigtable_api_row_set_cell] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1061,7 +1061,7 @@ def test_bigtable_row_setcell_rowkey(): assert status.code == 0 # [START bigtable_api_row_row_key] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1073,7 +1073,7 @@ def test_bigtable_row_setcell_rowkey(): assert row_key == ROW_KEY1 # [START bigtable_api_row_table] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1098,7 +1098,7 @@ def test_bigtable_row_delete(): assert written_row_keys == [b"row_key_1"] # [START bigtable_api_row_delete] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1130,7 +1130,7 @@ def test_bigtable_row_delete_cell(): assert written_row_keys == [row_key1] # [START bigtable_api_row_delete_cell] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1163,7 +1163,7 @@ def test_bigtable_row_delete_cells(): assert written_row_keys == [row_key1] # [START bigtable_api_row_delete_cells] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1189,7 +1189,7 @@ def test_bigtable_row_clear(): assert mutation_size > 0 # [START bigtable_api_row_clear] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1208,7 +1208,7 @@ def test_bigtable_row_clear(): def test_bigtable_row_clear_get_mutations_size(): # [START bigtable_api_row_get_mutations_size] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1230,7 +1230,7 @@ def test_bigtable_row_clear_get_mutations_size(): def test_bigtable_row_setcell_commit_rowkey(): # [START bigtable_api_row_set_cell] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1244,7 +1244,7 @@ def test_bigtable_row_setcell_commit_rowkey(): row_obj.commit() # [START bigtable_api_row_commit] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1264,7 +1264,7 @@ def test_bigtable_row_setcell_commit_rowkey(): assert written_row_keys == [b"row_key_1", b"row_key_2"] # [START bigtable_api_row_row_key] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1286,7 +1286,7 @@ def test_bigtable_row_append_cell_value(): row.commit() # [START bigtable_api_row_append_cell_value] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1303,7 +1303,7 @@ def test_bigtable_row_append_cell_value(): assert actual_value == cell_val1 + cell_val2 # [START bigtable_api_row_commit] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -1315,7 +1315,7 @@ def test_bigtable_row_append_cell_value(): # [END bigtable_api_row_commit] # [START bigtable_api_row_increment_cell_value] - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) diff --git a/docs/table-api.rst b/docs/table-api.rst index ce05a3419..1bbf85146 100644 --- a/docs/table-api.rst +++ b/docs/table-api.rst @@ -1,7 +1,7 @@ Table Admin API =============== -After creating an :class:`Instance `, you can +After creating an :class:`Instance `, you can interact with individual tables, groups of tables or column families within a table. @@ -10,33 +10,33 @@ List Tables If you want a comprehensive list of all existing tables in a instance, make a `ListTables`_ API request with -:meth:`Instance.list_tables() `: +:meth:`Instance.list_tables() `: .. code:: python >>> instance.list_tables() - [, - ] + [, + ] Table Factory ------------- -To create a :class:`Table ` object: +To create a :class:`Table ` object: .. code:: python table = instance.table(table_id) -Even if this :class:`Table ` already +Even if this :class:`Table ` already has been created with the API, you'll want this object to use as a -parent of a :class:`ColumnFamily ` -or :class:`Row `. +parent of a :class:`ColumnFamily ` +or :class:`Row `. Create a new Table ------------------ After creating the table object, make a `CreateTable`_ API request -with :meth:`create() `: +with :meth:`create() `: .. code:: python @@ -53,7 +53,7 @@ Delete an existing Table ------------------------ Make a `DeleteTable`_ API request with -:meth:`delete() `: +:meth:`delete() `: .. code:: python @@ -67,7 +67,7 @@ associated with a table, the `GetTable`_ API method returns a table object with the names of the column families. To retrieve the list of column families use -:meth:`list_column_families() `: +:meth:`list_column_families() `: .. code:: python @@ -77,7 +77,7 @@ Column Family Factory --------------------- To create a -:class:`ColumnFamily ` object: +:class:`ColumnFamily ` object: .. code:: python @@ -87,7 +87,7 @@ There is no real reason to use this factory unless you intend to create or delete a column family. In addition, you can specify an optional ``gc_rule`` (a -:class:`GarbageCollectionRule ` +:class:`GarbageCollectionRule ` or similar): .. code:: python @@ -99,7 +99,7 @@ This rule helps the backend determine when and how to clean up old cells in the column family. See :doc:`column-family` for more information about -:class:`GarbageCollectionRule ` +:class:`GarbageCollectionRule ` and related classes. Create a new Column Family @@ -107,7 +107,7 @@ Create a new Column Family After creating the column family object, make a `CreateColumnFamily`_ API request with -:meth:`ColumnFamily.create() ` +:meth:`ColumnFamily.create() ` .. code:: python @@ -117,7 +117,7 @@ Delete an existing Column Family -------------------------------- Make a `DeleteColumnFamily`_ API request with -:meth:`ColumnFamily.delete() ` +:meth:`ColumnFamily.delete() ` .. code:: python @@ -127,7 +127,7 @@ Update an existing Column Family -------------------------------- Make an `UpdateColumnFamily`_ API request with -:meth:`ColumnFamily.delete() ` +:meth:`ColumnFamily.delete() ` .. code:: python @@ -137,9 +137,9 @@ Next Step --------- Now we go down the final step of the hierarchy from -:class:`Table ` to -:class:`Row ` as well as streaming -data directly via a :class:`Table `. +:class:`Table ` to +:class:`Row ` as well as streaming +data directly via a :class:`Table `. Head next to learn about the :doc:`data-api`. diff --git a/docs/table.rst b/docs/table.rst index 0d938e0af..c230725d1 100644 --- a/docs/table.rst +++ b/docs/table.rst @@ -1,6 +1,6 @@ Table ~~~~~ -.. automodule:: google.cloud.bigtable.deprecated.table +.. automodule:: google.cloud.bigtable.table :members: :show-inheritance: diff --git a/docs/usage.rst b/docs/usage.rst index 80fb65898..73a32b039 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -17,16 +17,17 @@ Using the API row-data row-filters row-set + batcher In the hierarchy of API concepts -* a :class:`Client ` owns an - :class:`Instance ` -* an :class:`Instance ` owns a - :class:`Table ` -* a :class:`Table ` owns a - :class:`ColumnFamily ` -* a :class:`Table ` owns a - :class:`Row ` +* a :class:`Client ` owns an + :class:`Instance ` +* an :class:`Instance ` owns a + :class:`Table ` +* a :class:`Table ` owns a + :class:`ColumnFamily ` +* a :class:`Table ` owns a + :class:`Row ` (and all the cells in the row) diff --git a/google/cloud/bigtable/__init__.py b/google/cloud/bigtable/__init__.py index 06b45bc4d..7331ff241 100644 --- a/google/cloud/bigtable/__init__.py +++ b/google/cloud/bigtable/__init__.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,48 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# - -from typing import List, Tuple -from google.cloud.bigtable import gapic_version as package_version +"""Google Cloud Bigtable API package.""" -from google.cloud.bigtable.client import BigtableDataClient -from google.cloud.bigtable.client import Table +from google.cloud.bigtable.client import Client -from google.cloud.bigtable.read_rows_query import ReadRowsQuery -from google.cloud.bigtable.read_rows_query import RowRange -from google.cloud.bigtable.row import Row -from google.cloud.bigtable.row import Cell - -from google.cloud.bigtable.mutations_batcher import MutationsBatcher -from google.cloud.bigtable.mutations import Mutation -from google.cloud.bigtable.mutations import RowMutationEntry -from google.cloud.bigtable.mutations import SetCell -from google.cloud.bigtable.mutations import DeleteRangeFromColumn -from google.cloud.bigtable.mutations import DeleteAllFromFamily -from google.cloud.bigtable.mutations import DeleteAllFromRow +from google.cloud.bigtable import gapic_version as package_version -# Type alias for the output of sample_keys -RowKeySamples = List[Tuple[bytes, int]] -# type alias for the output of query.shard() -ShardedQuery = List[ReadRowsQuery] +__version__: str -__version__: str = package_version.__version__ +__version__ = package_version.__version__ -__all__ = ( - "BigtableDataClient", - "Table", - "RowKeySamples", - "ReadRowsQuery", - "RowRange", - "MutationsBatcher", - "Mutation", - "RowMutationEntry", - "SetCell", - "DeleteRangeFromColumn", - "DeleteAllFromFamily", - "DeleteAllFromRow", - "Row", - "Cell", -) +__all__ = ["__version__", "Client"] diff --git a/google/cloud/bigtable/deprecated/app_profile.py b/google/cloud/bigtable/app_profile.py similarity index 97% rename from google/cloud/bigtable/deprecated/app_profile.py rename to google/cloud/bigtable/app_profile.py index a5c3df356..8cde66146 100644 --- a/google/cloud/bigtable/deprecated/app_profile.py +++ b/google/cloud/bigtable/app_profile.py @@ -17,7 +17,7 @@ import re -from google.cloud.bigtable.deprecated.enums import RoutingPolicyType +from google.cloud.bigtable.enums import RoutingPolicyType from google.cloud.bigtable_admin_v2.types import instance from google.protobuf import field_mask_pb2 from google.api_core.exceptions import NotFound @@ -47,8 +47,8 @@ class AppProfile(object): :param: routing_policy_type: (Optional) The type of the routing policy. Possible values are represented by the following constants: - :data:`google.cloud.bigtable.deprecated.enums.RoutingPolicyType.ANY` - :data:`google.cloud.bigtable.deprecated.enums.RoutingPolicyType.SINGLE` + :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY` + :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE` :type: description: str :param: description: (Optional) Long form description of the use @@ -148,7 +148,7 @@ def from_pb(cls, app_profile_pb, instance): :type app_profile_pb: :class:`instance.app_profile_pb` :param app_profile_pb: An instance protobuf object. - :type instance: :class:`google.cloud.bigtable.deprecated.instance.Instance` + :type instance: :class:`google.cloud.bigtable.instance.Instance` :param instance: The instance that owns the cluster. :rtype: :class:`AppProfile` diff --git a/google/cloud/bigtable/deprecated/backup.py b/google/cloud/bigtable/backup.py similarity index 96% rename from google/cloud/bigtable/deprecated/backup.py rename to google/cloud/bigtable/backup.py index fc15318bc..6986d730a 100644 --- a/google/cloud/bigtable/deprecated/backup.py +++ b/google/cloud/bigtable/backup.py @@ -19,8 +19,8 @@ from google.cloud._helpers import _datetime_to_pb_timestamp # type: ignore from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable.deprecated.encryption_info import EncryptionInfo -from google.cloud.bigtable.deprecated.policy import Policy +from google.cloud.bigtable.encryption_info import EncryptionInfo +from google.cloud.bigtable.policy import Policy from google.cloud.exceptions import NotFound # type: ignore from google.protobuf import field_mask_pb2 @@ -50,7 +50,7 @@ class Backup(object): :type backup_id: str :param backup_id: The ID of the backup. - :type instance: :class:`~google.cloud.bigtable.deprecated.instance.Instance` + :type instance: :class:`~google.cloud.bigtable.instance.Instance` :param instance: The Instance that owns this Backup. :type cluster_id: str @@ -188,7 +188,7 @@ def expire_time(self, new_expire_time): def encryption_info(self): """Encryption info for this Backup. - :rtype: :class:`google.cloud.bigtable.deprecated.encryption.EncryptionInfo` + :rtype: :class:`google.cloud.bigtable.encryption.EncryptionInfo` :returns: The encryption information for this backup. """ return self._encryption_info @@ -238,10 +238,10 @@ def from_pb(cls, backup_pb, instance): :type backup_pb: :class:`table.Backup` :param backup_pb: A Backup protobuf object. - :type instance: :class:`Instance ` + :type instance: :class:`Instance ` :param instance: The Instance that owns the Backup. - :rtype: :class:`~google.cloud.bigtable.deprecated.backup.Backup` + :rtype: :class:`~google.cloud.bigtable.backup.Backup` :returns: The backup parsed from the protobuf response. :raises: ValueError: If the backup name does not match the expected format or the parsed project ID does not match the @@ -440,7 +440,7 @@ def restore(self, table_id, instance_id=None): def get_iam_policy(self): """Gets the IAM access control policy for this backup. - :rtype: :class:`google.cloud.bigtable.deprecated.policy.Policy` + :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this backup. """ table_api = self._instance._client.table_admin_client @@ -452,13 +452,13 @@ def set_iam_policy(self, policy): existing policy. For more information about policy, please see documentation of - class `google.cloud.bigtable.deprecated.policy.Policy` + class `google.cloud.bigtable.policy.Policy` - :type policy: :class:`google.cloud.bigtable.deprecated.policy.Policy` + :type policy: :class:`google.cloud.bigtable.policy.Policy` :param policy: A new IAM policy to replace the current IAM policy of this backup. - :rtype: :class:`google.cloud.bigtable.deprecated.policy.Policy` + :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this backup. """ table_api = self._instance._client.table_admin_client diff --git a/google/cloud/bigtable/batcher.py b/google/cloud/bigtable/batcher.py new file mode 100644 index 000000000..a6eb806e9 --- /dev/null +++ b/google/cloud/bigtable/batcher.py @@ -0,0 +1,395 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""User friendly container for Google Cloud Bigtable MutationBatcher.""" +import threading +import queue +import concurrent.futures +import atexit + + +from google.api_core.exceptions import from_grpc_status +from dataclasses import dataclass + + +FLUSH_COUNT = 100 # after this many elements, send out the batch + +MAX_MUTATION_SIZE = 20 * 1024 * 1024 # 20MB # after this many bytes, send out the batch + +MAX_OUTSTANDING_BYTES = 100 * 1024 * 1024 # 100MB # max inflight byte size. + +MAX_OUTSTANDING_ELEMENTS = 100000 # max inflight mutations. + + +class MutationsBatchError(Exception): + """Error in the batch request""" + + def __init__(self, message, exc): + self.exc = exc + self.message = message + super().__init__(self.message) + + +class _MutationsBatchQueue(object): + """Private Threadsafe Queue to hold rows for batching.""" + + def __init__(self, max_mutation_bytes=MAX_MUTATION_SIZE, flush_count=FLUSH_COUNT): + """Specify the queue constraints""" + self._queue = queue.Queue() + self.total_mutation_count = 0 + self.total_size = 0 + self.max_mutation_bytes = max_mutation_bytes + self.flush_count = flush_count + + def get(self): + """Retrieve an item from the queue. Recalculate queue size.""" + row = self._queue.get() + mutation_size = row.get_mutations_size() + self.total_mutation_count -= len(row._get_mutations()) + self.total_size -= mutation_size + return row + + def put(self, item): + """Insert an item to the queue. Recalculate queue size.""" + + mutation_count = len(item._get_mutations()) + + self._queue.put(item) + + self.total_size += item.get_mutations_size() + self.total_mutation_count += mutation_count + + def full(self): + """Check if the queue is full.""" + if ( + self.total_mutation_count >= self.flush_count + or self.total_size >= self.max_mutation_bytes + ): + return True + return False + + def empty(self): + return self._queue.empty() + + +@dataclass +class _BatchInfo: + """Keeping track of size of a batch""" + + mutations_count: int = 0 + rows_count: int = 0 + mutations_size: int = 0 + + +class _FlowControl(object): + def __init__( + self, + max_mutations=MAX_OUTSTANDING_ELEMENTS, + max_mutation_bytes=MAX_OUTSTANDING_BYTES, + ): + """Control the inflight requests. Keep track of the mutations, row bytes and row counts. + As requests to backend are being made, adjust the number of mutations being processed. + + If threshold is reached, block the flow. + Reopen the flow as requests are finished. + """ + self.max_mutations = max_mutations + self.max_mutation_bytes = max_mutation_bytes + self.inflight_mutations = 0 + self.inflight_size = 0 + self.event = threading.Event() + self.event.set() + + def is_blocked(self): + """Returns True if: + + - inflight mutations >= max_mutations, or + - inflight bytes size >= max_mutation_bytes, or + """ + + return ( + self.inflight_mutations >= self.max_mutations + or self.inflight_size >= self.max_mutation_bytes + ) + + def control_flow(self, batch_info): + """ + Calculate the resources used by this batch + """ + + self.inflight_mutations += batch_info.mutations_count + self.inflight_size += batch_info.mutations_size + self.set_flow_control_status() + + def wait(self): + """ + Wait until flow control pushback has been released. + It awakens as soon as `event` is set. + """ + self.event.wait() + + def set_flow_control_status(self): + """Check the inflight mutations and size. + + If values exceed the allowed threshold, block the event. + """ + if self.is_blocked(): + self.event.clear() # sleep + else: + self.event.set() # awaken the threads + + def release(self, batch_info): + """ + Release the resources. + Decrement the row size to allow enqueued mutations to be run. + """ + self.inflight_mutations -= batch_info.mutations_count + self.inflight_size -= batch_info.mutations_size + self.set_flow_control_status() + + +class MutationsBatcher(object): + """A MutationsBatcher is used in batch cases where the number of mutations + is large or unknown. It will store :class:`DirectRow` in memory until one of the + size limits is reached, or an explicit call to :func:`flush()` is performed. When + a flush event occurs, the :class:`DirectRow` in memory will be sent to Cloud + Bigtable. Batching mutations is more efficient than sending individual + request. + + This class is not suited for usage in systems where each mutation + must be guaranteed to be sent, since calling mutate may only result in an + in-memory change. In a case of a system crash, any :class:`DirectRow` remaining in + memory will not necessarily be sent to the service, even after the + completion of the :func:`mutate()` method. + + Note on thread safety: The same :class:`MutationBatcher` cannot be shared by multiple end-user threads. + + :type table: class + :param table: class:`~google.cloud.bigtable.table.Table`. + + :type flush_count: int + :param flush_count: (Optional) Max number of rows to flush. If it + reaches the max number of rows it calls finish_batch() to mutate the + current row batch. Default is FLUSH_COUNT (1000 rows). + + :type max_row_bytes: int + :param max_row_bytes: (Optional) Max number of row mutations size to + flush. If it reaches the max number of row mutations size it calls + finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES + (5 MB). + + :type flush_interval: float + :param flush_interval: (Optional) The interval (in seconds) between asynchronous flush. + Default is 1 second. + + :type batch_completed_callback: Callable[list:[`~google.rpc.status_pb2.Status`]] = None + :param batch_completed_callback: (Optional) A callable for handling responses + after the current batch is sent. The callable function expect a list of grpc + Status. + """ + + def __init__( + self, + table, + flush_count=FLUSH_COUNT, + max_row_bytes=MAX_MUTATION_SIZE, + flush_interval=1, + batch_completed_callback=None, + ): + self._rows = _MutationsBatchQueue( + max_mutation_bytes=max_row_bytes, flush_count=flush_count + ) + self.table = table + self._executor = concurrent.futures.ThreadPoolExecutor() + atexit.register(self.close) + self._timer = threading.Timer(flush_interval, self.flush) + self._timer.start() + self.flow_control = _FlowControl( + max_mutations=MAX_OUTSTANDING_ELEMENTS, + max_mutation_bytes=MAX_OUTSTANDING_BYTES, + ) + self.futures_mapping = {} + self.exceptions = queue.Queue() + self._user_batch_completed_callback = batch_completed_callback + + @property + def flush_count(self): + return self._rows.flush_count + + @property + def max_row_bytes(self): + return self._rows.max_mutation_bytes + + def __enter__(self): + """Starting the MutationsBatcher as a context manager""" + return self + + def mutate(self, row): + """Add a row to the batch. If the current batch meets one of the size + limits, the batch is sent asynchronously. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_batcher_mutate] + :end-before: [END bigtable_api_batcher_mutate] + :dedent: 4 + + :type row: class + :param row: :class:`~google.cloud.bigtable.row.DirectRow`. + + :raises: One of the following: + * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried + """ + self._rows.put(row) + + if self._rows.full(): + self._flush_async() + + def mutate_rows(self, rows): + """Add multiple rows to the batch. If the current batch meets one of the size + limits, the batch is sent asynchronously. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_batcher_mutate_rows] + :end-before: [END bigtable_api_batcher_mutate_rows] + :dedent: 4 + + :type rows: list:[`~google.cloud.bigtable.row.DirectRow`] + :param rows: list:[`~google.cloud.bigtable.row.DirectRow`]. + + :raises: One of the following: + * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried + """ + for row in rows: + self.mutate(row) + + def flush(self): + """Sends the current batch to Cloud Bigtable synchronously. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_batcher_flush] + :end-before: [END bigtable_api_batcher_flush] + :dedent: 4 + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + rows_to_flush = [] + while not self._rows.empty(): + rows_to_flush.append(self._rows.get()) + response = self._flush_rows(rows_to_flush) + return response + + def _flush_async(self): + """Sends the current batch to Cloud Bigtable asynchronously. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + + rows_to_flush = [] + mutations_count = 0 + mutations_size = 0 + rows_count = 0 + batch_info = _BatchInfo() + + while not self._rows.empty(): + row = self._rows.get() + mutations_count += len(row._get_mutations()) + mutations_size += row.get_mutations_size() + rows_count += 1 + rows_to_flush.append(row) + batch_info.mutations_count = mutations_count + batch_info.rows_count = rows_count + batch_info.mutations_size = mutations_size + + if ( + rows_count >= self.flush_count + or mutations_size >= self.max_row_bytes + or mutations_count >= self.flow_control.max_mutations + or mutations_size >= self.flow_control.max_mutation_bytes + or self._rows.empty() # submit when it reached the end of the queue + ): + # wait for resources to become available, before submitting any new batch + self.flow_control.wait() + # once unblocked, submit a batch + # event flag will be set by control_flow to block subsequent thread, but not blocking this one + self.flow_control.control_flow(batch_info) + future = self._executor.submit(self._flush_rows, rows_to_flush) + self.futures_mapping[future] = batch_info + future.add_done_callback(self._batch_completed_callback) + + # reset and start a new batch + rows_to_flush = [] + mutations_size = 0 + rows_count = 0 + mutations_count = 0 + batch_info = _BatchInfo() + + def _batch_completed_callback(self, future): + """Callback for when the mutation has finished to clean up the current batch + and release items from the flow controller. + + Raise exceptions if there's any. + Release the resources locked by the flow control and allow enqueued tasks to be run. + """ + + processed_rows = self.futures_mapping[future] + self.flow_control.release(processed_rows) + del self.futures_mapping[future] + + def _flush_rows(self, rows_to_flush): + """Mutate the specified rows. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + responses = [] + if len(rows_to_flush) > 0: + response = self.table.mutate_rows(rows_to_flush) + + if self._user_batch_completed_callback: + self._user_batch_completed_callback(response) + + for result in response: + if result.code != 0: + exc = from_grpc_status(result.code, result.message) + self.exceptions.put(exc) + responses.append(result) + + return responses + + def __exit__(self, exc_type, exc_value, exc_traceback): + """Clean up resources. Flush and shutdown the ThreadPoolExecutor.""" + self.close() + + def close(self): + """Clean up resources. Flush and shutdown the ThreadPoolExecutor. + Any errors will be raised. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + self.flush() + self._executor.shutdown(wait=True) + atexit.unregister(self.close) + if self.exceptions.qsize() > 0: + exc = list(self.exceptions.queue) + raise MutationsBatchError("Errors in batch mutations.", exc=exc) diff --git a/google/cloud/bigtable/client.py b/google/cloud/bigtable/client.py index 4ec3cea27..c82a268c6 100644 --- a/google/cloud/bigtable/client.py +++ b/google/cloud/bigtable/client.py @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,1081 +11,503 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -from __future__ import annotations +"""Parent client for calling the Google Cloud Bigtable API. -from typing import ( - cast, - Any, - Optional, - Set, - TYPE_CHECKING, -) +This is the base from which all interactions with the API occur. + +In the hierarchy of API concepts -import asyncio -import grpc -import time +* a :class:`~google.cloud.bigtable.client.Client` owns an + :class:`~google.cloud.bigtable.instance.Instance` +* an :class:`~google.cloud.bigtable.instance.Instance` owns a + :class:`~google.cloud.bigtable.table.Table` +* a :class:`~google.cloud.bigtable.table.Table` owns a + :class:`~.column_family.ColumnFamily` +* a :class:`~google.cloud.bigtable.table.Table` owns a + :class:`~google.cloud.bigtable.row.Row` (and all the cells in the row) +""" +import os import warnings -import sys -import random +import grpc # type: ignore -from collections import namedtuple +from google.api_core.gapic_v1 import client_info as client_info_lib +import google.auth # type: ignore +from google.auth.credentials import AnonymousCredentials # type: ignore -from google.cloud.bigtable_v2.services.bigtable.client import BigtableClientMeta -from google.cloud.bigtable_v2.services.bigtable.async_client import BigtableAsyncClient -from google.cloud.bigtable_v2.services.bigtable.async_client import DEFAULT_CLIENT_INFO -from google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio import ( - PooledBigtableGrpcAsyncIOTransport, +from google.cloud import bigtable_v2 +from google.cloud import bigtable_admin_v2 +from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports import ( + BigtableInstanceAdminGrpcTransport, ) -from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest -from google.cloud.client import ClientWithProject -from google.api_core.exceptions import GoogleAPICallError -from google.api_core import retry_async as retries -from google.api_core import exceptions as core_exceptions -from google.cloud.bigtable._read_rows import _ReadRowsOperation - -import google.auth.credentials -import google.auth._default -from google.api_core import client_options as client_options_lib -from google.cloud.bigtable.row import Row -from google.cloud.bigtable.read_rows_query import ReadRowsQuery -from google.cloud.bigtable.iterators import ReadRowsIterator -from google.cloud.bigtable.exceptions import FailedQueryShardError -from google.cloud.bigtable.exceptions import ShardedReadRowsExceptionGroup - -from google.cloud.bigtable.mutations import Mutation, RowMutationEntry -from google.cloud.bigtable._mutate_rows import _MutateRowsOperation -from google.cloud.bigtable._helpers import _make_metadata -from google.cloud.bigtable._helpers import _convert_retry_deadline -from google.cloud.bigtable.mutations_batcher import MutationsBatcher -from google.cloud.bigtable.mutations_batcher import _MB_SIZE -from google.cloud.bigtable._helpers import _attempt_timeout_generator - -from google.cloud.bigtable.read_modify_write_rules import ReadModifyWriteRule -from google.cloud.bigtable.row_filters import RowFilter -from google.cloud.bigtable.row_filters import StripValueTransformerFilter -from google.cloud.bigtable.row_filters import CellsRowLimitFilter -from google.cloud.bigtable.row_filters import RowFilterChain - -if TYPE_CHECKING: - from google.cloud.bigtable import RowKeySamples - from google.cloud.bigtable import ShardedQuery - -# used by read_rows_sharded to limit how many requests are attempted in parallel -CONCURRENCY_LIMIT = 10 - -# used to register instance data with the client for channel warming -_WarmedInstanceKey = namedtuple( - "_WarmedInstanceKey", ["instance_name", "table_name", "app_profile_id"] +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports import ( + BigtableTableAdminGrpcTransport, ) +from google.cloud import bigtable +from google.cloud.bigtable.instance import Instance +from google.cloud.bigtable.cluster import Cluster + +from google.cloud.client import ClientWithProject # type: ignore + +from google.cloud.bigtable_admin_v2.types import instance +from google.cloud.bigtable.cluster import _CLUSTER_NAME_RE +from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore + + +INSTANCE_TYPE_PRODUCTION = instance.Instance.Type.PRODUCTION +INSTANCE_TYPE_DEVELOPMENT = instance.Instance.Type.DEVELOPMENT +INSTANCE_TYPE_UNSPECIFIED = instance.Instance.Type.TYPE_UNSPECIFIED +SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin" +ADMIN_SCOPE = "https://www.googleapis.com/auth/bigtable.admin" +"""Scope for interacting with the Cluster Admin and Table Admin APIs.""" +DATA_SCOPE = "https://www.googleapis.com/auth/bigtable.data" +"""Scope for reading and writing table data.""" +READ_ONLY_SCOPE = "https://www.googleapis.com/auth/bigtable.data.readonly" +"""Scope for reading table data.""" + +_DEFAULT_BIGTABLE_EMULATOR_CLIENT = "google-cloud-bigtable-emulator" +_GRPC_CHANNEL_OPTIONS = ( + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ("grpc.keepalive_time_ms", 30000), + ("grpc.keepalive_timeout_ms", 10000), +) -class BigtableDataClient(ClientWithProject): - def __init__( - self, - *, - project: str | None = None, - pool_size: int = 3, - credentials: google.auth.credentials.Credentials | None = None, - client_options: dict[str, Any] - | "google.api_core.client_options.ClientOptions" - | None = None, - ): - """ - Create a client instance for the Bigtable Data API - - Client should be created within an async context (running event loop) - - Args: - project: the project which the client acts on behalf of. - If not passed, falls back to the default inferred - from the environment. - pool_size: The number of grpc channels to maintain - in the internal channel pool. - credentials: - Thehe OAuth2 Credentials to use for this - client. If not passed (and if no ``_http`` object is - passed), falls back to the default inferred from the - environment. - client_options (Optional[Union[dict, google.api_core.client_options.ClientOptions]]): - Client options used to set user options - on the client. API Endpoint should be set through client_options. - Raises: - - RuntimeError if called outside of an async context (no running event loop) - - ValueError if pool_size is less than 1 - """ - # set up transport in registry - transport_str = f"pooled_grpc_asyncio_{pool_size}" - transport = PooledBigtableGrpcAsyncIOTransport.with_fixed_size(pool_size) - BigtableClientMeta._transport_registry[transport_str] = transport - # set up client info headers for veneer library - client_info = DEFAULT_CLIENT_INFO - client_info.client_library_version = client_info.gapic_version - # parse client options - if type(client_options) is dict: - client_options = client_options_lib.from_dict(client_options) - client_options = cast( - Optional[client_options_lib.ClientOptions], client_options - ) - # initialize client - ClientWithProject.__init__( - self, - credentials=credentials, - project=project, - client_options=client_options, - ) - self._gapic_client = BigtableAsyncClient( - transport=transport_str, - credentials=credentials, + +def _create_gapic_client(client_class, client_options=None, transport=None): + def inner(self): + return client_class( + credentials=None, + client_info=self._client_info, client_options=client_options, - client_info=client_info, - ) - self.transport = cast( - PooledBigtableGrpcAsyncIOTransport, self._gapic_client.transport + transport=transport, ) - # keep track of active instances to for warmup on channel refresh - self._active_instances: Set[_WarmedInstanceKey] = set() - # keep track of table objects associated with each instance - # only remove instance from _active_instances when all associated tables remove it - self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {} - # attempt to start background tasks - self._channel_init_time = time.monotonic() - self._channel_refresh_tasks: list[asyncio.Task[None]] = [] - try: - self.start_background_channel_refresh() - except RuntimeError: - warnings.warn( - f"{self.__class__.__name__} should be started in an " - "asyncio event loop. Channel refresh will not be started", - RuntimeWarning, - stacklevel=2, - ) - def start_background_channel_refresh(self) -> None: - """ - Starts a background task to ping and warm each channel in the pool - Raises: - - RuntimeError if not called in an asyncio event loop - """ - if not self._channel_refresh_tasks: - # raise RuntimeError if there is no event loop - asyncio.get_running_loop() - for channel_idx in range(self.transport.pool_size): - refresh_task = asyncio.create_task(self._manage_channel(channel_idx)) - if sys.version_info >= (3, 8): - # task names supported in Python 3.8+ - refresh_task.set_name( - f"{self.__class__.__name__} channel refresh {channel_idx}" - ) - self._channel_refresh_tasks.append(refresh_task) - - async def close(self, timeout: float = 2.0): - """ - Cancel all background tasks - """ - for task in self._channel_refresh_tasks: - task.cancel() - group = asyncio.gather(*self._channel_refresh_tasks, return_exceptions=True) - await asyncio.wait_for(group, timeout=timeout) - await self.transport.close() - self._channel_refresh_tasks = [] - - async def _ping_and_warm_instances( - self, channel: grpc.aio.Channel, instance_key: _WarmedInstanceKey | None = None - ) -> list[GoogleAPICallError | None]: - """ - Prepares the backend for requests on a channel + return inner - Pings each Bigtable instance registered in `_active_instances` on the client - Args: - - channel: grpc channel to warm - - instance_key: if provided, only warm the instance associated with the key - Returns: - - sequence of results or exceptions from the ping requests - """ - instance_list = ( - [instance_key] if instance_key is not None else self._active_instances - ) - ping_rpc = channel.unary_unary( - "/google.bigtable.v2.Bigtable/PingAndWarm", - request_serializer=PingAndWarmRequest.serialize, - ) - # prepare list of coroutines to run - tasks = [ - ping_rpc( - request={"name": instance_name, "app_profile_id": app_profile_id}, - metadata=[ - ( - "x-goog-request-params", - f"name={instance_name}&app_profile_id={app_profile_id}", - ) - ], - wait_for_ready=True, - ) - for (instance_name, table_name, app_profile_id) in instance_list - ] - # execute coroutines in parallel - result_list = await asyncio.gather(*tasks, return_exceptions=True) - # return None in place of empty successful responses - return [r or None for r in result_list] - - async def _manage_channel( - self, - channel_idx: int, - refresh_interval_min: float = 60 * 35, - refresh_interval_max: float = 60 * 45, - grace_period: float = 60 * 10, - ) -> None: - """ - Background coroutine that periodically refreshes and warms a grpc channel - - The backend will automatically close channels after 60 minutes, so - `refresh_interval` + `grace_period` should be < 60 minutes - - Runs continuously until the client is closed - - Args: - channel_idx: index of the channel in the transport's channel pool - refresh_interval_min: minimum interval before initiating refresh - process in seconds. Actual interval will be a random value - between `refresh_interval_min` and `refresh_interval_max` - refresh_interval_max: maximum interval before initiating refresh - process in seconds. Actual interval will be a random value - between `refresh_interval_min` and `refresh_interval_max` - grace_period: time to allow previous channel to serve existing - requests before closing, in seconds - """ - first_refresh = self._channel_init_time + random.uniform( - refresh_interval_min, refresh_interval_max - ) - next_sleep = max(first_refresh - time.monotonic(), 0) - if next_sleep > 0: - # warm the current channel immediately - channel = self.transport.channels[channel_idx] - await self._ping_and_warm_instances(channel) - # continuously refresh the channel every `refresh_interval` seconds - while True: - await asyncio.sleep(next_sleep) - # prepare new channel for use - new_channel = self.transport.grpc_channel._create_channel() - await self._ping_and_warm_instances(new_channel) - # cycle channel out of use, with long grace window before closure - start_timestamp = time.time() - await self.transport.replace_channel( - channel_idx, grace=grace_period, swap_sleep=10, new_channel=new_channel - ) - # subtract the time spent waiting for the channel to be replaced - next_refresh = random.uniform(refresh_interval_min, refresh_interval_max) - next_sleep = next_refresh - (time.time() - start_timestamp) +class Client(ClientWithProject): + """Client for interacting with Google Cloud Bigtable API. - async def _register_instance(self, instance_id: str, owner: Table) -> None: - """ - Registers an instance with the client, and warms the channel pool - for the instance - The client will periodically refresh grpc channel pool used to make - requests, and new channels will be warmed for each registered instance - Channels will not be refreshed unless at least one instance is registered - - Args: - - instance_id: id of the instance to register. - - owner: table that owns the instance. Owners will be tracked in - _instance_owners, and instances will only be unregistered when all - owners call _remove_instance_registration - """ - instance_name = self._gapic_client.instance_path(self.project, instance_id) - instance_key = _WarmedInstanceKey( - instance_name, owner.table_name, owner.app_profile_id - ) - self._instance_owners.setdefault(instance_key, set()).add(id(owner)) - if instance_name not in self._active_instances: - self._active_instances.add(instance_key) - if self._channel_refresh_tasks: - # refresh tasks already running - # call ping and warm on all existing channels - for channel in self.transport.channels: - await self._ping_and_warm_instances(channel, instance_key) - else: - # refresh tasks aren't active. start them as background tasks - self.start_background_channel_refresh() - - async def _remove_instance_registration( - self, instance_id: str, owner: Table - ) -> bool: - """ - Removes an instance from the client's registered instances, to prevent - warming new channels for the instance + .. note:: - If instance_id is not registered, or is still in use by other tables, returns False + Since the Cloud Bigtable API requires the gRPC transport, no + ``_http`` argument is accepted by this class. - Args: - - instance_id: id of the instance to remove - - owner: table that owns the instance. Owners will be tracked in - _instance_owners, and instances will only be unregistered when all - owners call _remove_instance_registration - Returns: - - True if instance was removed - """ - instance_name = self._gapic_client.instance_path(self.project, instance_id) - instance_key = _WarmedInstanceKey( - instance_name, owner.table_name, owner.app_profile_id - ) - owner_list = self._instance_owners.get(instance_key, set()) - try: - owner_list.remove(id(owner)) - if len(owner_list) == 0: - self._active_instances.remove(instance_key) - return True - except KeyError: - return False - - # TODO: revisit timeouts https://github.com/googleapis/python-bigtable/issues/782 - def get_table( - self, - instance_id: str, - table_id: str, - app_profile_id: str | None = None, - default_operation_timeout: float = 600, - default_per_request_timeout: float | None = None, - ) -> Table: - """ - Returns a table instance for making data API requests - - Args: - instance_id: The Bigtable instance ID to associate with this client. - instance_id is combined with the client's project to fully - specify the instance - table_id: The ID of the table. - app_profile_id: (Optional) The app profile to associate with requests. - https://cloud.google.com/bigtable/docs/app-profiles - """ - return Table( - self, - instance_id, - table_id, - app_profile_id, - default_operation_timeout=default_operation_timeout, - default_per_request_timeout=default_per_request_timeout, - ) + :type project: :class:`str` or :func:`unicode ` + :param project: (Optional) The ID of the project which owns the + instances, tables and data. If not provided, will + attempt to determine from the environment. - async def __aenter__(self): - self.start_background_channel_refresh() - return self + :type credentials: :class:`~google.auth.credentials.Credentials` + :param credentials: (Optional) The OAuth2 Credentials to use for this + client. If not passed, falls back to the default + inferred from the environment. - async def __aexit__(self, exc_type, exc_val, exc_tb): - await self.close() - await self._gapic_client.__aexit__(exc_type, exc_val, exc_tb) + :type read_only: bool + :param read_only: (Optional) Boolean indicating if the data scope should be + for reading only (or for writing as well). Defaults to + :data:`False`. + :type admin: bool + :param admin: (Optional) Boolean indicating if the client will be used to + interact with the Instance Admin or Table Admin APIs. This + requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`. -class Table: - """ - Main Data API surface + :type: client_info: :class:`google.api_core.gapic_v1.client_info.ClientInfo` + :param client_info: + The client info used to send a user-agent string along with API + requests. If ``None``, then default info will be used. Generally, + you only need to set this if you're developing your own library + or partner tool. + + :type client_options: :class:`~google.api_core.client_options.ClientOptions` + or :class:`dict` + :param client_options: (Optional) Client options used to set user options + on the client. API Endpoint should be set through client_options. + + :type admin_client_options: + :class:`~google.api_core.client_options.ClientOptions` or :class:`dict` + :param admin_client_options: (Optional) Client options used to set user + options on the client. API Endpoint for admin operations should be set + through admin_client_options. - Table object maintains table_id, and app_profile_id context, and passes them with - each call + :type channel: :instance: grpc.Channel + :param channel (grpc.Channel): (Optional) DEPRECATED: + A ``Channel`` instance through which to make calls. + This argument is mutually exclusive with ``credentials``; + providing both will raise an exception. No longer used. + + :raises: :class:`ValueError ` if both ``read_only`` + and ``admin`` are :data:`True` """ + _table_data_client = None + _table_admin_client = None + _instance_admin_client = None + def __init__( self, - client: BigtableDataClient, - instance_id: str, - table_id: str, - app_profile_id: str | None = None, - *, - default_operation_timeout: float = 600, - default_per_request_timeout: float | None = None, + project=None, + credentials=None, + read_only=False, + admin=False, + client_info=None, + client_options=None, + admin_client_options=None, + channel=None, ): - """ - Initialize a Table instance - - Must be created within an async context (running event loop) - - Args: - instance_id: The Bigtable instance ID to associate with this client. - instance_id is combined with the client's project to fully - specify the instance - table_id: The ID of the table. table_id is combined with the - instance_id and the client's project to fully specify the table - app_profile_id: (Optional) The app profile to associate with requests. - https://cloud.google.com/bigtable/docs/app-profiles - default_operation_timeout: (Optional) The default timeout, in seconds - default_per_request_timeout: (Optional) The default timeout for individual - rpc requests, in seconds - Raises: - - RuntimeError if called outside of an async context (no running event loop) - """ - # validate timeouts - if default_operation_timeout <= 0: - raise ValueError("default_operation_timeout must be greater than 0") - if default_per_request_timeout is not None and default_per_request_timeout <= 0: - raise ValueError("default_per_request_timeout must be greater than 0") - if ( - default_per_request_timeout is not None - and default_per_request_timeout > default_operation_timeout - ): + if client_info is None: + client_info = client_info_lib.ClientInfo( + client_library_version=bigtable.__version__, + ) + if read_only and admin: raise ValueError( - "default_per_request_timeout must be less than default_operation_timeout" + "A read-only client cannot also perform" "administrative actions." ) - self.client = client - self.instance_id = instance_id - self.instance_name = self.client._gapic_client.instance_path( - self.client.project, instance_id - ) - self.table_id = table_id - self.table_name = self.client._gapic_client.table_path( - self.client.project, instance_id, table_id - ) - self.app_profile_id = app_profile_id - self.default_operation_timeout = default_operation_timeout - self.default_per_request_timeout = default_per_request_timeout + # NOTE: We set the scopes **before** calling the parent constructor. + # It **may** use those scopes in ``with_scopes_if_required``. + self._read_only = bool(read_only) + self._admin = bool(admin) + self._client_info = client_info + self._emulator_host = os.getenv(BIGTABLE_EMULATOR) - # raises RuntimeError if called outside of an async context (no running event loop) - try: - self._register_instance_task = asyncio.create_task( - self.client._register_instance(instance_id, self) - ) - except RuntimeError as e: - raise RuntimeError( - f"{self.__class__.__name__} must be created within an async event loop context." - ) from e + if self._emulator_host is not None: + if credentials is None: + credentials = AnonymousCredentials() + if project is None: + project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT - async def read_rows_stream( - self, - query: ReadRowsQuery | dict[str, Any], - *, - operation_timeout: float | None = None, - per_request_timeout: float | None = None, - ) -> ReadRowsIterator: - """ - Returns an iterator to asynchronously stream back row data. + if channel is not None: + warnings.warn( + "'channel' is deprecated and no longer used.", + DeprecationWarning, + stacklevel=2, + ) - Failed requests within operation_timeout and operation_deadline policies will be retried. + self._client_options = client_options + self._admin_client_options = admin_client_options + self._channel = channel + self.SCOPE = self._get_scopes() + super(Client, self).__init__( + project=project, + credentials=credentials, + client_options=client_options, + ) - Args: - - query: contains details about which rows to return - - operation_timeout: the time budget for the entire operation, in seconds. - Failed requests will be retried within the budget. - time is only counted while actively waiting on the network. - If None, defaults to the Table's default_operation_timeout - - per_request_timeout: the time budget for an individual network request, in seconds. - If it takes longer than this time to complete, the request will be cancelled with - a DeadlineExceeded exception, and a retry will be attempted. - If None, defaults to the Table's default_per_request_timeout + def _get_scopes(self): + """Get the scopes corresponding to admin / read-only state. Returns: - - an asynchronous iterator that yields rows returned by the query - Raises: - - DeadlineExceeded: raised after operation timeout - will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions - from any retries that failed - - GoogleAPIError: raised if the request encounters an unrecoverable error - - IdleTimeout: if iterator was abandoned + Tuple[str, ...]: The tuple of scopes. """ + if self._read_only: + scopes = (READ_ONLY_SCOPE,) + else: + scopes = (DATA_SCOPE,) - operation_timeout = operation_timeout or self.default_operation_timeout - per_request_timeout = per_request_timeout or self.default_per_request_timeout + if self._admin: + scopes += (ADMIN_SCOPE,) - if operation_timeout <= 0: - raise ValueError("operation_timeout must be greater than 0") - if per_request_timeout is not None and per_request_timeout <= 0: - raise ValueError("per_request_timeout must be greater than 0") - if per_request_timeout is not None and per_request_timeout > operation_timeout: - raise ValueError( - "per_request_timeout must not be greater than operation_timeout" - ) - if per_request_timeout is None: - per_request_timeout = operation_timeout - request = query._to_dict() if isinstance(query, ReadRowsQuery) else query - request["table_name"] = self.table_name - if self.app_profile_id: - request["app_profile_id"] = self.app_profile_id - - # read_rows smart retries is implemented using a series of iterators: - # - client.read_rows: outputs raw ReadRowsResponse objects from backend. Has per_request_timeout - # - ReadRowsOperation.merge_row_response_stream: parses chunks into rows - # - ReadRowsOperation.retryable_merge_rows: adds retries, caching, revised requests, per_request_timeout - # - ReadRowsIterator: adds idle_timeout, moves stats out of stream and into attribute - row_merger = _ReadRowsOperation( - request, - self.client._gapic_client, - operation_timeout=operation_timeout, - per_request_timeout=per_request_timeout, - ) - output_generator = ReadRowsIterator(row_merger) - # add idle timeout to clear resources if generator is abandoned - idle_timeout_seconds = 300 - await output_generator._start_idle_timer(idle_timeout_seconds) - return output_generator + return scopes - async def read_rows( - self, - query: ReadRowsQuery | dict[str, Any], - *, - operation_timeout: float | None = None, - per_request_timeout: float | None = None, - ) -> list[Row]: - """ - Helper function that returns a full list instead of a generator + def _emulator_channel(self, transport, options): + """Create a channel using self._credentials - See read_rows_stream + Works in a similar way to ``grpc.secure_channel`` but using + ``grpc.local_channel_credentials`` rather than + ``grpc.ssh_channel_credentials`` to allow easy connection to a + local emulator. Returns: - - a list of the rows returned by the query - """ - row_generator = await self.read_rows_stream( - query, - operation_timeout=operation_timeout, - per_request_timeout=per_request_timeout, - ) - results = [row async for row in row_generator] - return results - - async def read_row( - self, - row_key: str | bytes, - *, - row_filter: RowFilter | None = None, - operation_timeout: int | float | None = 60, - per_request_timeout: int | float | None = None, - ) -> Row | None: - """ - Helper function to return a single row + grpc.Channel or grpc.aio.Channel + """ + # TODO: Implement a special credentials type for emulator and use + # "transport.create_channel" to create gRPC channels once google-auth + # extends it's allowed credentials types. + # Note: this code also exists in the firestore client. + if "GrpcAsyncIOTransport" in str(transport.__name__): + return grpc.aio.secure_channel( + self._emulator_host, + self._local_composite_credentials(), + options=options, + ) + else: + return grpc.secure_channel( + self._emulator_host, + self._local_composite_credentials(), + options=options, + ) - See read_rows_stream + def _local_composite_credentials(self): + """Create credentials for the local emulator channel. - Raises: - - google.cloud.bigtable.exceptions.RowNotFound: if the row does not exist - Returns: - - the individual row requested, or None if it does not exist + :return: grpc.ChannelCredentials """ - if row_key is None: - raise ValueError("row_key must be string or bytes") - query = ReadRowsQuery(row_keys=row_key, row_filter=row_filter, limit=1) - results = await self.read_rows( - query, - operation_timeout=operation_timeout, - per_request_timeout=per_request_timeout, + credentials = google.auth.credentials.with_scopes_if_required( + self._credentials, None ) - if len(results) == 0: - return None - return results[0] + request = google.auth.transport.requests.Request() - async def read_rows_sharded( - self, - sharded_query: ShardedQuery, - *, - operation_timeout: int | float | None = None, - per_request_timeout: int | float | None = None, - ) -> list[Row]: - """ - Runs a sharded query in parallel, then return the results in a single list. - Results will be returned in the order of the input queries. - - This function is intended to be run on the results on a query.shard() call: - - ``` - table_shard_keys = await table.sample_row_keys() - query = ReadRowsQuery(...) - shard_queries = query.shard(table_shard_keys) - results = await table.read_rows_sharded(shard_queries) - ``` - - Args: - - sharded_query: a sharded query to execute - Raises: - - ShardedReadRowsExceptionGroup: if any of the queries failed - - ValueError: if the query_list is empty - """ - if not sharded_query: - raise ValueError("empty sharded_query") - # reduce operation_timeout between batches - operation_timeout = operation_timeout or self.default_operation_timeout - per_request_timeout = ( - per_request_timeout or self.default_per_request_timeout or operation_timeout + # Create the metadata plugin for inserting the authorization header. + metadata_plugin = google.auth.transport.grpc.AuthMetadataPlugin( + credentials, request ) - timeout_generator = _attempt_timeout_generator( - operation_timeout, operation_timeout + + # Create a set of grpc.CallCredentials using the metadata plugin. + google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin) + + # Using the local_credentials to allow connection to emulator + local_credentials = grpc.local_channel_credentials() + + # Combine the local credentials and the authorization credentials. + return grpc.composite_channel_credentials( + local_credentials, google_auth_credentials ) - # submit shards in batches if the number of shards goes over CONCURRENCY_LIMIT - batched_queries = [ - sharded_query[i : i + CONCURRENCY_LIMIT] - for i in range(0, len(sharded_query), CONCURRENCY_LIMIT) - ] - # run batches and collect results - results_list = [] - error_dict = {} - shard_idx = 0 - for batch in batched_queries: - batch_operation_timeout = next(timeout_generator) - routine_list = [ - self.read_rows( - query, - operation_timeout=batch_operation_timeout, - per_request_timeout=min( - per_request_timeout, batch_operation_timeout - ), - ) - for query in batch - ] - batch_result = await asyncio.gather(*routine_list, return_exceptions=True) - for result in batch_result: - if isinstance(result, Exception): - error_dict[shard_idx] = result - else: - results_list.extend(result) - shard_idx += 1 - if error_dict: - # if any sub-request failed, raise an exception instead of returning results - raise ShardedReadRowsExceptionGroup( - [ - FailedQueryShardError(idx, sharded_query[idx], e) - for idx, e in error_dict.items() - ], - results_list, - len(sharded_query), + + def _create_gapic_client_channel(self, client_class, grpc_transport): + if self._emulator_host is not None: + api_endpoint = self._emulator_host + elif self._client_options and self._client_options.api_endpoint: + api_endpoint = self._client_options.api_endpoint + else: + api_endpoint = client_class.DEFAULT_ENDPOINT + + if self._emulator_host is not None: + channel = self._emulator_channel( + transport=grpc_transport, + options=_GRPC_CHANNEL_OPTIONS, ) - return results_list + else: + channel = grpc_transport.create_channel( + host=api_endpoint, + credentials=self._credentials, + options=_GRPC_CHANNEL_OPTIONS, + ) + return grpc_transport(channel=channel, host=api_endpoint) - async def row_exists( - self, - row_key: str | bytes, - *, - operation_timeout: int | float | None = 60, - per_request_timeout: int | float | None = None, - ) -> bool: - """ - Helper function to determine if a row exists + @property + def project_path(self): + """Project name to be used with Instance Admin API. - uses the filters: chain(limit cells per row = 1, strip value) + .. note:: - Returns: - - a bool indicating whether the row exists - """ - if row_key is None: - raise ValueError("row_key must be string or bytes") - strip_filter = StripValueTransformerFilter(flag=True) - limit_filter = CellsRowLimitFilter(1) - chain_filter = RowFilterChain(filters=[limit_filter, strip_filter]) - query = ReadRowsQuery(row_keys=row_key, limit=1, row_filter=chain_filter) - results = await self.read_rows( - query, - operation_timeout=operation_timeout, - per_request_timeout=per_request_timeout, - ) - return len(results) > 0 + This property will not change if ``project`` does not, but the + return value is not cached. - async def sample_row_keys( - self, - *, - operation_timeout: float | None = None, - per_request_timeout: float | None = None, - ) -> RowKeySamples: + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_api_project_path] + :end-before: [END bigtable_api_project_path] + :dedent: 4 + + The project name is of the form + + ``"projects/{project}"`` + + :rtype: str + :returns: Return a fully-qualified project string. """ - Return a set of RowKeySamples that delimit contiguous sections of the table of - approximately equal size + return self.instance_admin_client.common_project_path(self.project) - RowKeySamples output can be used with ReadRowsQuery.shard() to create a sharded query that - can be parallelized across multiple backend nodes read_rows and read_rows_stream - requests will call sample_row_keys internally for this purpose when sharding is enabled + @property + def table_data_client(self): + """Getter for the gRPC stub used for the Table Admin API. - RowKeySamples is simply a type alias for list[tuple[bytes, int]]; a list of - row_keys, along with offset positions in the table + For example: - Returns: - - a set of RowKeySamples the delimit contiguous sections of the table - Raises: - - GoogleAPICallError: if the sample_row_keys request fails + .. literalinclude:: snippets.py + :start-after: [START bigtable_api_table_data_client] + :end-before: [END bigtable_api_table_data_client] + :dedent: 4 + + :rtype: :class:`.bigtable_v2.BigtableClient` + :returns: A BigtableClient object. """ - # prepare timeouts - operation_timeout = operation_timeout or self.default_operation_timeout - per_request_timeout = per_request_timeout or self.default_per_request_timeout - - if operation_timeout <= 0: - raise ValueError("operation_timeout must be greater than 0") - if per_request_timeout is not None and per_request_timeout <= 0: - raise ValueError("per_request_timeout must be greater than 0") - if per_request_timeout is not None and per_request_timeout > operation_timeout: - raise ValueError( - "per_request_timeout must not be greater than operation_timeout" + if self._table_data_client is None: + transport = self._create_gapic_client_channel( + bigtable_v2.BigtableClient, + BigtableGrpcTransport, ) - attempt_timeout_gen = _attempt_timeout_generator( - per_request_timeout, operation_timeout - ) - # prepare retryable - predicate = retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ) - transient_errors = [] - - def on_error_fn(exc): - # add errors to list if retryable - if predicate(exc): - transient_errors.append(exc) - - retry = retries.AsyncRetry( - predicate=predicate, - timeout=operation_timeout, - initial=0.01, - multiplier=2, - maximum=60, - on_error=on_error_fn, - is_stream=False, - ) + klass = _create_gapic_client( + bigtable_v2.BigtableClient, + client_options=self._client_options, + transport=transport, + ) + self._table_data_client = klass(self) + return self._table_data_client - # prepare request - metadata = _make_metadata(self.table_name, self.app_profile_id) + @property + def table_admin_client(self): + """Getter for the gRPC stub used for the Table Admin API. - async def execute_rpc(): - results = await self.client._gapic_client.sample_row_keys( - table_name=self.table_name, - app_profile_id=self.app_profile_id, - timeout=next(attempt_timeout_gen), - metadata=metadata, - ) - return [(s.row_key, s.offset_bytes) async for s in results] + For example: - wrapped_fn = _convert_retry_deadline( - retry(execute_rpc), operation_timeout, transient_errors - ) - return await wrapped_fn() + .. literalinclude:: snippets.py + :start-after: [START bigtable_api_table_admin_client] + :end-before: [END bigtable_api_table_admin_client] + :dedent: 4 - def mutations_batcher( - self, - *, - flush_interval: float | None = 5, - flush_limit_mutation_count: int | None = 1000, - flush_limit_bytes: int = 20 * _MB_SIZE, - flow_control_max_mutation_count: int = 100_000, - flow_control_max_bytes: int = 100 * _MB_SIZE, - batch_operation_timeout: float | None = None, - batch_per_request_timeout: float | None = None, - ) -> MutationsBatcher: - """ - Returns a new mutations batcher instance. - - Can be used to iteratively add mutations that are flushed as a group, - to avoid excess network calls - - Args: - - flush_interval: Automatically flush every flush_interval seconds. If None, - a table default will be used - - flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count - mutations are added across all entries. If None, this limit is ignored. - - flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. - - flow_control_max_mutation_count: Maximum number of inflight mutations. - - flow_control_max_bytes: Maximum number of inflight bytes. - - batch_operation_timeout: timeout for each mutate_rows operation, in seconds. If None, - table default_operation_timeout will be used - - batch_per_request_timeout: timeout for each individual request, in seconds. If None, - table default_per_request_timeout will be used - Returns: - - a MutationsBatcher context manager that can batch requests + :rtype: :class:`.bigtable_admin_pb2.BigtableTableAdmin` + :returns: A BigtableTableAdmin instance. + :raises: :class:`ValueError ` if the current + client is not an admin client or if it has not been + :meth:`start`-ed. """ - return MutationsBatcher( - self, - flush_interval=flush_interval, - flush_limit_mutation_count=flush_limit_mutation_count, - flush_limit_bytes=flush_limit_bytes, - flow_control_max_mutation_count=flow_control_max_mutation_count, - flow_control_max_bytes=flow_control_max_bytes, - batch_operation_timeout=batch_operation_timeout, - batch_per_request_timeout=batch_per_request_timeout, - ) + if self._table_admin_client is None: + if not self._admin: + raise ValueError("Client is not an admin client.") - async def mutate_row( - self, - row_key: str | bytes, - mutations: list[Mutation] | Mutation, - *, - operation_timeout: float | None = 60, - per_request_timeout: float | None = None, - ): - """ - Mutates a row atomically. - - Cells already present in the row are left unchanged unless explicitly changed - by ``mutation``. - - Idempotent operations (i.e, all mutations have an explicit timestamp) will be - retried on server failure. Non-idempotent operations will not. - - Args: - - row_key: the row to apply mutations to - - mutations: the set of mutations to apply to the row - - operation_timeout: the time budget for the entire operation, in seconds. - Failed requests will be retried within the budget. - time is only counted while actively waiting on the network. - DeadlineExceeded exception raised after timeout - - per_request_timeout: the time budget for an individual network request, - in seconds. If it takes longer than this time to complete, the request - will be cancelled with a DeadlineExceeded exception, and a retry will be - attempted if within operation_timeout budget - - Raises: - - DeadlineExceeded: raised after operation timeout - will be chained with a RetryExceptionGroup containing all - GoogleAPIError exceptions from any retries that failed - - GoogleAPIError: raised on non-idempotent operations that cannot be - safely retried. - """ - operation_timeout = operation_timeout or self.default_operation_timeout - per_request_timeout = per_request_timeout or self.default_per_request_timeout - - if operation_timeout <= 0: - raise ValueError("operation_timeout must be greater than 0") - if per_request_timeout is not None and per_request_timeout <= 0: - raise ValueError("per_request_timeout must be greater than 0") - if per_request_timeout is not None and per_request_timeout > operation_timeout: - raise ValueError("per_request_timeout must be less than operation_timeout") - - if isinstance(row_key, str): - row_key = row_key.encode("utf-8") - request = {"table_name": self.table_name, "row_key": row_key} - if self.app_profile_id: - request["app_profile_id"] = self.app_profile_id - - if isinstance(mutations, Mutation): - mutations = [mutations] - request["mutations"] = [mutation._to_dict() for mutation in mutations] - - if all(mutation.is_idempotent() for mutation in mutations): - # mutations are all idempotent and safe to retry - predicate = retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, + transport = self._create_gapic_client_channel( + bigtable_admin_v2.BigtableTableAdminClient, + BigtableTableAdminGrpcTransport, ) - else: - # mutations should not be retried - predicate = retries.if_exception_type() - - transient_errors = [] - - def on_error_fn(exc): - if predicate(exc): - transient_errors.append(exc) - - retry = retries.AsyncRetry( - predicate=predicate, - on_error=on_error_fn, - timeout=operation_timeout, - initial=0.01, - multiplier=2, - maximum=60, - ) - # wrap rpc in retry logic - retry_wrapped = retry(self.client._gapic_client.mutate_row) - # convert RetryErrors from retry wrapper into DeadlineExceeded errors - deadline_wrapped = _convert_retry_deadline( - retry_wrapped, operation_timeout, transient_errors - ) - metadata = _make_metadata(self.table_name, self.app_profile_id) - # trigger rpc - await deadline_wrapped(request, timeout=per_request_timeout, metadata=metadata) + klass = _create_gapic_client( + bigtable_admin_v2.BigtableTableAdminClient, + client_options=self._admin_client_options, + transport=transport, + ) + self._table_admin_client = klass(self) + return self._table_admin_client - async def bulk_mutate_rows( - self, - mutation_entries: list[RowMutationEntry], - *, - operation_timeout: float | None = 60, - per_request_timeout: float | None = None, - ): - """ - Applies mutations for multiple rows in a single batched request. - - Each individual RowMutationEntry is applied atomically, but separate entries - may be applied in arbitrary order (even for entries targetting the same row) - In total, the row_mutations can contain at most 100000 individual mutations - across all entries - - Idempotent entries (i.e., entries with mutations with explicit timestamps) - will be retried on failure. Non-idempotent will not, and will reported in a - raised exception group - - Args: - - mutation_entries: the batches of mutations to apply - Each entry will be applied atomically, but entries will be applied - in arbitrary order - - operation_timeout: the time budget for the entire operation, in seconds. - Failed requests will be retried within the budget. - time is only counted while actively waiting on the network. - DeadlineExceeded exception raised after timeout - - per_request_timeout: the time budget for an individual network request, - in seconds. If it takes longer than this time to complete, the request - will be cancelled with a DeadlineExceeded exception, and a retry will - be attempted if within operation_timeout budget - Raises: - - MutationsExceptionGroup if one or more mutations fails - Contains details about any failed entries in .exceptions - """ - operation_timeout = operation_timeout or self.default_operation_timeout - per_request_timeout = per_request_timeout or self.default_per_request_timeout - - if operation_timeout <= 0: - raise ValueError("operation_timeout must be greater than 0") - if per_request_timeout is not None and per_request_timeout <= 0: - raise ValueError("per_request_timeout must be greater than 0") - if per_request_timeout is not None and per_request_timeout > operation_timeout: - raise ValueError("per_request_timeout must be less than operation_timeout") - - operation = _MutateRowsOperation( - self.client._gapic_client, - self, - mutation_entries, - operation_timeout, - per_request_timeout, - ) - await operation.start() + @property + def instance_admin_client(self): + """Getter for the gRPC stub used for the Table Admin API. - async def check_and_mutate_row( - self, - row_key: str | bytes, - predicate: RowFilter | dict[str, Any] | None, - *, - true_case_mutations: Mutation | list[Mutation] | None = None, - false_case_mutations: Mutation | list[Mutation] | None = None, - operation_timeout: int | float | None = 20, - ) -> bool: - """ - Mutates a row atomically based on the output of a predicate filter - - Non-idempotent operation: will not be retried - - Args: - - row_key: the key of the row to mutate - - predicate: the filter to be applied to the contents of the specified row. - Depending on whether or not any results are yielded, - either true_case_mutations or false_case_mutations will be executed. - If None, checks that the row contains any values at all. - - true_case_mutations: - Changes to be atomically applied to the specified row if - predicate yields at least one cell when - applied to row_key. Entries are applied in order, - meaning that earlier mutations can be masked by later - ones. Must contain at least one entry if - false_case_mutations is empty, and at most 100000. - - false_case_mutations: - Changes to be atomically applied to the specified row if - predicate_filter does not yield any cells when - applied to row_key. Entries are applied in order, - meaning that earlier mutations can be masked by later - ones. Must contain at least one entry if - `true_case_mutations is empty, and at most 100000. - - operation_timeout: the time budget for the entire operation, in seconds. - Failed requests will not be retried. - Returns: - - bool indicating whether the predicate was true or false - Raises: - - GoogleAPIError exceptions from grpc call + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_api_instance_admin_client] + :end-before: [END bigtable_api_instance_admin_client] + :dedent: 4 + + :rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin` + :returns: A BigtableInstanceAdmin instance. + :raises: :class:`ValueError ` if the current + client is not an admin client or if it has not been + :meth:`start`-ed. """ - operation_timeout = operation_timeout or self.default_operation_timeout - if operation_timeout <= 0: - raise ValueError("operation_timeout must be greater than 0") - row_key = row_key.encode("utf-8") if isinstance(row_key, str) else row_key - if true_case_mutations is not None and not isinstance( - true_case_mutations, list - ): - true_case_mutations = [true_case_mutations] - true_case_dict = [m._to_dict() for m in true_case_mutations or []] - if false_case_mutations is not None and not isinstance( - false_case_mutations, list - ): - false_case_mutations = [false_case_mutations] - false_case_dict = [m._to_dict() for m in false_case_mutations or []] - if predicate is not None and not isinstance(predicate, dict): - predicate = predicate.to_dict() - metadata = _make_metadata(self.table_name, self.app_profile_id) - result = await self.client._gapic_client.check_and_mutate_row( - request={ - "predicate_filter": predicate, - "true_mutations": true_case_dict, - "false_mutations": false_case_dict, - "table_name": self.table_name, - "row_key": row_key, - "app_profile_id": self.app_profile_id, - }, - metadata=metadata, - timeout=operation_timeout, + if self._instance_admin_client is None: + if not self._admin: + raise ValueError("Client is not an admin client.") + + transport = self._create_gapic_client_channel( + bigtable_admin_v2.BigtableInstanceAdminClient, + BigtableInstanceAdminGrpcTransport, + ) + klass = _create_gapic_client( + bigtable_admin_v2.BigtableInstanceAdminClient, + client_options=self._admin_client_options, + transport=transport, + ) + self._instance_admin_client = klass(self) + return self._instance_admin_client + + def instance(self, instance_id, display_name=None, instance_type=None, labels=None): + """Factory to create a instance associated with this client. + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_api_create_prod_instance] + :end-before: [END bigtable_api_create_prod_instance] + :dedent: 4 + + :type instance_id: str + :param instance_id: The ID of the instance. + + :type display_name: str + :param display_name: (Optional) The display name for the instance in + the Cloud Console UI. (Must be between 4 and 30 + characters.) If this value is not set in the + constructor, will fall back to the instance ID. + + :type instance_type: int + :param instance_type: (Optional) The type of the instance. + Possible values are represented + by the following constants: + :data:`google.cloud.bigtable.instance.InstanceType.PRODUCTION`. + :data:`google.cloud.bigtable.instance.InstanceType.DEVELOPMENT`, + Defaults to + :data:`google.cloud.bigtable.instance.InstanceType.UNSPECIFIED`. + + :type labels: dict + :param labels: (Optional) Labels are a flexible and lightweight + mechanism for organizing cloud resources into groups + that reflect a customer's organizational needs and + deployment strategies. They can be used to filter + resources and aggregate metrics. Label keys must be + between 1 and 63 characters long. Maximum 64 labels can + be associated with a given resource. Label values must + be between 0 and 63 characters long. Keys and values + must both be under 128 bytes. + + :rtype: :class:`~google.cloud.bigtable.instance.Instance` + :returns: an instance owned by this client. + """ + return Instance( + instance_id, + self, + display_name=display_name, + instance_type=instance_type, + labels=labels, ) - return result.predicate_matched - async def read_modify_write_row( - self, - row_key: str | bytes, - rules: ReadModifyWriteRule | list[ReadModifyWriteRule], - *, - operation_timeout: int | float | None = 20, - ) -> Row: - """ - Reads and modifies a row atomically according to input ReadModifyWriteRules, - and returns the contents of all modified cells + def list_instances(self): + """List instances owned by the project. - The new value for the timestamp is the greater of the existing timestamp or - the current server time. + For example: - Non-idempotent operation: will not be retried + .. literalinclude:: snippets.py + :start-after: [START bigtable_api_list_instances] + :end-before: [END bigtable_api_list_instances] + :dedent: 4 - Args: - - row_key: the key of the row to apply read/modify/write rules to - - rules: A rule or set of rules to apply to the row. - Rules are applied in order, meaning that earlier rules will affect the - results of later ones. - - operation_timeout: the time budget for the entire operation, in seconds. - Failed requests will not be retried. - Returns: - - Row: containing cell data that was modified as part of the - operation - Raises: - - GoogleAPIError exceptions from grpc call + :rtype: tuple + :returns: + (instances, failed_locations), where 'instances' is list of + :class:`google.cloud.bigtable.instance.Instance`, and + 'failed_locations' is a list of locations which could not + be resolved. """ - operation_timeout = operation_timeout or self.default_operation_timeout - row_key = row_key.encode("utf-8") if isinstance(row_key, str) else row_key - if operation_timeout <= 0: - raise ValueError("operation_timeout must be greater than 0") - if rules is not None and not isinstance(rules, list): - rules = [rules] - if not rules: - raise ValueError("rules must contain at least one item") - # concert to dict representation - rules_dict = [rule._to_dict() for rule in rules] - metadata = _make_metadata(self.table_name, self.app_profile_id) - result = await self.client._gapic_client.read_modify_write_row( - request={ - "rules": rules_dict, - "table_name": self.table_name, - "row_key": row_key, - "app_profile_id": self.app_profile_id, - }, - metadata=metadata, - timeout=operation_timeout, + resp = self.instance_admin_client.list_instances( + request={"parent": self.project_path} ) - # construct Row from result - return Row._from_pb(result.row) + instances = [Instance.from_pb(instance, self) for instance in resp.instances] + return instances, resp.failed_locations - async def close(self): - """ - Called to close the Table instance and release any resources held by it. - """ - self._register_instance_task.cancel() - await self.client._remove_instance_registration(self.instance_id, self) + def list_clusters(self): + """List the clusters in the project. - async def __aenter__(self): - """ - Implement async context manager protocol + For example: - Ensure registration task has time to run, so that - grpc channels will be warmed for the specified instance - """ - await self._register_instance_task - return self + .. literalinclude:: snippets.py + :start-after: [START bigtable_api_list_clusters_in_project] + :end-before: [END bigtable_api_list_clusters_in_project] + :dedent: 4 - async def __aexit__(self, exc_type, exc_val, exc_tb): + :rtype: tuple + :returns: + (clusters, failed_locations), where 'clusters' is list of + :class:`google.cloud.bigtable.instance.Cluster`, and + 'failed_locations' is a list of strings representing + locations which could not be resolved. """ - Implement async context manager protocol - - Unregister this instance with the client, so that - grpc channels will no longer be warmed - """ - await self.close() + resp = self.instance_admin_client.list_clusters( + request={ + "parent": self.instance_admin_client.instance_path(self.project, "-") + } + ) + clusters = [] + instances = {} + for cluster in resp.clusters: + match_cluster_name = _CLUSTER_NAME_RE.match(cluster.name) + instance_id = match_cluster_name.group("instance") + if instance_id not in instances: + instances[instance_id] = self.instance(instance_id) + clusters.append(Cluster.from_pb(cluster, instances[instance_id])) + return clusters, resp.failed_locations diff --git a/google/cloud/bigtable/deprecated/cluster.py b/google/cloud/bigtable/cluster.py similarity index 95% rename from google/cloud/bigtable/deprecated/cluster.py rename to google/cloud/bigtable/cluster.py index b60d3503c..11fb5492d 100644 --- a/google/cloud/bigtable/deprecated/cluster.py +++ b/google/cloud/bigtable/cluster.py @@ -42,7 +42,7 @@ class Cluster(object): :type cluster_id: str :param cluster_id: The ID of the cluster. - :type instance: :class:`~google.cloud.bigtable.deprecated.instance.Instance` + :type instance: :class:`~google.cloud.bigtable.instance.Instance` :param instance: The instance where the cluster resides. :type location_id: str @@ -62,10 +62,10 @@ class Cluster(object): :param default_storage_type: (Optional) The type of storage Possible values are represented by the following constants: - :data:`google.cloud.bigtable.deprecated.enums.StorageType.SSD`. - :data:`google.cloud.bigtable.deprecated.enums.StorageType.HDD`, + :data:`google.cloud.bigtable.enums.StorageType.SSD`. + :data:`google.cloud.bigtable.enums.StorageType.HDD`, Defaults to - :data:`google.cloud.bigtable.deprecated.enums.StorageType.UNSPECIFIED`. + :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. :type kms_key_name: str :param kms_key_name: (Optional, Creation Only) The name of the KMS customer managed @@ -84,11 +84,11 @@ class Cluster(object): :param _state: (`OutputOnly`) The current state of the cluster. Possible values are represented by the following constants: - :data:`google.cloud.bigtable.deprecated.enums.Cluster.State.NOT_KNOWN`. - :data:`google.cloud.bigtable.deprecated.enums.Cluster.State.READY`. - :data:`google.cloud.bigtable.deprecated.enums.Cluster.State.CREATING`. - :data:`google.cloud.bigtable.deprecated.enums.Cluster.State.RESIZING`. - :data:`google.cloud.bigtable.deprecated.enums.Cluster.State.DISABLED`. + :data:`google.cloud.bigtable.enums.Cluster.State.NOT_KNOWN`. + :data:`google.cloud.bigtable.enums.Cluster.State.READY`. + :data:`google.cloud.bigtable.enums.Cluster.State.CREATING`. + :data:`google.cloud.bigtable.enums.Cluster.State.RESIZING`. + :data:`google.cloud.bigtable.enums.Cluster.State.DISABLED`. :type min_serve_nodes: int :param min_serve_nodes: (Optional) The minimum number of nodes to be set in the cluster for autoscaling. @@ -150,7 +150,7 @@ def from_pb(cls, cluster_pb, instance): :type cluster_pb: :class:`instance.Cluster` :param cluster_pb: An instance protobuf object. - :type instance: :class:`google.cloud.bigtable.deprecated.instance.Instance` + :type instance: :class:`google.cloud.bigtable.instance.Instance` :param instance: The instance that owns the cluster. :rtype: :class:`Cluster` @@ -236,7 +236,7 @@ def name(self): @property def state(self): - """google.cloud.bigtable.deprecated.enums.Cluster.State: state of cluster. + """google.cloud.bigtable.enums.Cluster.State: state of cluster. For example: diff --git a/google/cloud/bigtable/deprecated/column_family.py b/google/cloud/bigtable/column_family.py similarity index 99% rename from google/cloud/bigtable/deprecated/column_family.py rename to google/cloud/bigtable/column_family.py index 3d4c1a642..80232958d 100644 --- a/google/cloud/bigtable/deprecated/column_family.py +++ b/google/cloud/bigtable/column_family.py @@ -195,7 +195,7 @@ class ColumnFamily(object): :param column_family_id: The ID of the column family. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - :type table: :class:`Table ` + :type table: :class:`Table ` :param table: The table that owns the column family. :type gc_rule: :class:`GarbageCollectionRule` diff --git a/google/cloud/bigtable/data/__init__.py b/google/cloud/bigtable/data/__init__.py new file mode 100644 index 000000000..c68e78c6f --- /dev/null +++ b/google/cloud/bigtable/data/__init__.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import List, Tuple + +from google.cloud.bigtable import gapic_version as package_version + +from google.cloud.bigtable.data._async.client import BigtableDataClientAsync +from google.cloud.bigtable.data._async.client import TableAsync +from google.cloud.bigtable.data._async._read_rows import ReadRowsAsyncIterator +from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync + +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.read_rows_query import RowRange +from google.cloud.bigtable.data.row import Row +from google.cloud.bigtable.data.row import Cell + +from google.cloud.bigtable.data.mutations import Mutation +from google.cloud.bigtable.data.mutations import RowMutationEntry +from google.cloud.bigtable.data.mutations import SetCell +from google.cloud.bigtable.data.mutations import DeleteRangeFromColumn +from google.cloud.bigtable.data.mutations import DeleteAllFromFamily +from google.cloud.bigtable.data.mutations import DeleteAllFromRow + +from google.cloud.bigtable.data.exceptions import IdleTimeout +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.exceptions import FailedMutationEntryError +from google.cloud.bigtable.data.exceptions import FailedQueryShardError + +from google.cloud.bigtable.data.exceptions import RetryExceptionGroup +from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup +from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + +# Type alias for the output of sample_keys +RowKeySamples = List[Tuple[bytes, int]] +# type alias for the output of query.shard() +ShardedQuery = List[ReadRowsQuery] + +__version__: str = package_version.__version__ + +__all__ = ( + "BigtableDataClientAsync", + "TableAsync", + "RowKeySamples", + "ReadRowsQuery", + "RowRange", + "MutationsBatcherAsync", + "Mutation", + "RowMutationEntry", + "SetCell", + "DeleteRangeFromColumn", + "DeleteAllFromFamily", + "DeleteAllFromRow", + "Row", + "Cell", + "ReadRowsAsyncIterator", + "IdleTimeout", + "InvalidChunk", + "FailedMutationEntryError", + "FailedQueryShardError", + "RetryExceptionGroup", + "MutationsExceptionGroup", + "ShardedReadRowsExceptionGroup", + "ShardedQuery", +) diff --git a/google/cloud/bigtable/data/_async/__init__.py b/google/cloud/bigtable/data/_async/__init__.py new file mode 100644 index 000000000..1e92e58dc --- /dev/null +++ b/google/cloud/bigtable/data/_async/__init__.py @@ -0,0 +1,26 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.bigtable.data._async.client import BigtableDataClientAsync +from google.cloud.bigtable.data._async.client import TableAsync +from google.cloud.bigtable.data._async._read_rows import ReadRowsAsyncIterator +from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync + + +__all__ = [ + "BigtableDataClientAsync", + "TableAsync", + "ReadRowsAsyncIterator", + "MutationsBatcherAsync", +] diff --git a/google/cloud/bigtable/_mutate_rows.py b/google/cloud/bigtable/data/_async/_mutate_rows.py similarity index 91% rename from google/cloud/bigtable/_mutate_rows.py rename to google/cloud/bigtable/data/_async/_mutate_rows.py index e34ebaeb6..ac491adaf 100644 --- a/google/cloud/bigtable/_mutate_rows.py +++ b/google/cloud/bigtable/data/_async/_mutate_rows.py @@ -19,31 +19,23 @@ from google.api_core import exceptions as core_exceptions from google.api_core import retry_async as retries -import google.cloud.bigtable.exceptions as bt_exceptions -from google.cloud.bigtable._helpers import _make_metadata -from google.cloud.bigtable._helpers import _convert_retry_deadline -from google.cloud.bigtable._helpers import _attempt_timeout_generator +import google.cloud.bigtable.data.exceptions as bt_exceptions +from google.cloud.bigtable.data._helpers import _make_metadata +from google.cloud.bigtable.data._helpers import _convert_retry_deadline +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator + +# mutate_rows requests are limited to this number of mutations +from google.cloud.bigtable.data.mutations import MUTATE_ROWS_REQUEST_MUTATION_LIMIT if TYPE_CHECKING: from google.cloud.bigtable_v2.services.bigtable.async_client import ( BigtableAsyncClient, ) - from google.cloud.bigtable.client import Table - from google.cloud.bigtable.mutations import RowMutationEntry - -# mutate_rows requests are limited to this value -MUTATE_ROWS_REQUEST_MUTATION_LIMIT = 100_000 - - -class _MutateRowsIncomplete(RuntimeError): - """ - Exception raised when a mutate_rows call has unfinished work. - """ - - pass + from google.cloud.bigtable.data.mutations import RowMutationEntry + from google.cloud.bigtable.data._async.client import TableAsync -class _MutateRowsOperation: +class _MutateRowsOperationAsync: """ MutateRowsOperation manages the logic of sending a set of row mutations, and retrying on failed entries. It manages this using the _run_attempt @@ -57,7 +49,7 @@ class _MutateRowsOperation: def __init__( self, gapic_client: "BigtableAsyncClient", - table: "Table", + table: "TableAsync", mutation_entries: list["RowMutationEntry"], operation_timeout: float, per_request_timeout: float | None, @@ -93,7 +85,7 @@ def __init__( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, # Entry level errors - _MutateRowsIncomplete, + bt_exceptions._MutateRowsIncomplete, ) # build retryable operation retry = retries.AsyncRetry( @@ -199,7 +191,7 @@ async def _run_attempt(self): # check if attempt succeeded, or needs to be retried if self.remaining_indices: # unfinished work; raise exception to trigger retry - raise _MutateRowsIncomplete + raise bt_exceptions._MutateRowsIncomplete def _handle_entry_error(self, idx: int, exc: Exception): """ diff --git a/google/cloud/bigtable/data/_async/_read_rows.py b/google/cloud/bigtable/data/_async/_read_rows.py new file mode 100644 index 000000000..910a01c4c --- /dev/null +++ b/google/cloud/bigtable/data/_async/_read_rows.py @@ -0,0 +1,403 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import ( + List, + Any, + AsyncIterable, + AsyncIterator, + AsyncGenerator, + Iterator, + Callable, + Awaitable, +) +import sys +import time +import asyncio +from functools import partial +from grpc.aio import RpcContext + +from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse +from google.cloud.bigtable_v2.services.bigtable.async_client import BigtableAsyncClient +from google.cloud.bigtable.data.row import Row, _LastScannedRow +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.exceptions import _RowSetComplete +from google.cloud.bigtable.data.exceptions import IdleTimeout +from google.cloud.bigtable.data._read_rows_state_machine import _StateMachine +from google.api_core import retry_async as retries +from google.api_core import exceptions as core_exceptions +from google.cloud.bigtable.data._helpers import _make_metadata +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data._helpers import _convert_retry_deadline + + +class _ReadRowsOperationAsync(AsyncIterable[Row]): + """ + ReadRowsOperation handles the logic of merging chunks from a ReadRowsResponse stream + into a stream of Row objects. + + ReadRowsOperation.merge_row_response_stream takes in a stream of ReadRowsResponse + and turns them into a stream of Row objects using an internal + StateMachine. + + ReadRowsOperation(request, client) handles row merging logic end-to-end, including + performing retries on stream errors. + """ + + def __init__( + self, + request: dict[str, Any], + client: BigtableAsyncClient, + *, + operation_timeout: float = 600.0, + per_request_timeout: float | None = None, + ): + """ + Args: + - request: the request dict to send to the Bigtable API + - client: the Bigtable client to use to make the request + - operation_timeout: the timeout to use for the entire operation, in seconds + - per_request_timeout: the timeout to use when waiting for each individual grpc request, in seconds + If not specified, defaults to operation_timeout + """ + self._last_emitted_row_key: bytes | None = None + self._emit_count = 0 + self._request = request + self.operation_timeout = operation_timeout + # use generator to lower per-attempt timeout as we approach operation_timeout deadline + attempt_timeout_gen = _attempt_timeout_generator( + per_request_timeout, operation_timeout + ) + row_limit = request.get("rows_limit", 0) + # lock in paramters for retryable wrapper + self._partial_retryable = partial( + self._read_rows_retryable_attempt, + client.read_rows, + attempt_timeout_gen, + row_limit, + ) + predicate = retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + core_exceptions.Aborted, + ) + + def on_error_fn(exc): + if predicate(exc): + self.transient_errors.append(exc) + + retry = retries.AsyncRetry( + predicate=predicate, + timeout=self.operation_timeout, + initial=0.01, + multiplier=2, + maximum=60, + on_error=on_error_fn, + is_stream=True, + ) + self._stream: AsyncGenerator[Row, None] | None = retry( + self._partial_retryable + )() + # contains the list of errors that were retried + self.transient_errors: List[Exception] = [] + + def __aiter__(self) -> AsyncIterator[Row]: + """Implements the AsyncIterable interface""" + return self + + async def __anext__(self) -> Row: + """Implements the AsyncIterator interface""" + if self._stream is not None: + return await self._stream.__anext__() + else: + raise asyncio.InvalidStateError("stream is closed") + + async def aclose(self): + """Close the stream and release resources""" + if self._stream is not None: + await self._stream.aclose() + self._stream = None + self._emitted_seen_row_key = None + + async def _read_rows_retryable_attempt( + self, + gapic_fn: Callable[..., Awaitable[AsyncIterable[ReadRowsResponse]]], + timeout_generator: Iterator[float], + total_row_limit: int, + ) -> AsyncGenerator[Row, None]: + """ + Retryable wrapper for merge_rows. This function is called each time + a retry is attempted. + + Some fresh state is created on each retry: + - grpc network stream + - state machine to hold merge chunks received from stream + Some state is shared between retries: + - _last_emitted_row_key is used to ensure that + duplicate rows are not emitted + - request is stored and (potentially) modified on each retry + """ + if self._last_emitted_row_key is not None: + # if this is a retry, try to trim down the request to avoid ones we've already processed + try: + self._request["rows"] = _ReadRowsOperationAsync._revise_request_rowset( + row_set=self._request.get("rows", None), + last_seen_row_key=self._last_emitted_row_key, + ) + except _RowSetComplete: + # if there are no rows left to process, we're done + # This is not expected to happen often, but could occur if + # a retry is triggered quickly after the last row is emitted + return + # revise next request's row limit based on number emitted + if total_row_limit: + new_limit = total_row_limit - self._emit_count + if new_limit == 0: + # we have hit the row limit, so we're done + return + elif new_limit < 0: + raise RuntimeError("unexpected state: emit count exceeds row limit") + else: + self._request["rows_limit"] = new_limit + metadata = _make_metadata( + self._request.get("table_name", None), + self._request.get("app_profile_id", None), + ) + new_gapic_stream: RpcContext = await gapic_fn( + self._request, + timeout=next(timeout_generator), + metadata=metadata, + ) + try: + state_machine = _StateMachine() + stream = _ReadRowsOperationAsync.merge_row_response_stream( + new_gapic_stream, state_machine + ) + # run until we get a timeout or the stream is exhausted + async for new_item in stream: + if ( + self._last_emitted_row_key is not None + and new_item.row_key <= self._last_emitted_row_key + ): + raise InvalidChunk("Last emitted row key out of order") + # don't yeild _LastScannedRow markers; they + # should only update last_seen_row_key + if not isinstance(new_item, _LastScannedRow): + yield new_item + self._emit_count += 1 + self._last_emitted_row_key = new_item.row_key + if total_row_limit and self._emit_count >= total_row_limit: + return + except (Exception, GeneratorExit) as exc: + # ensure grpc stream is closed + new_gapic_stream.cancel() + raise exc + + @staticmethod + def _revise_request_rowset( + row_set: dict[str, Any] | None, + last_seen_row_key: bytes, + ) -> dict[str, Any]: + """ + Revise the rows in the request to avoid ones we've already processed. + + Args: + - row_set: the row set from the request + - last_seen_row_key: the last row key encountered + Raises: + - _RowSetComplete: if there are no rows left to process after the revision + """ + # if user is doing a whole table scan, start a new one with the last seen key + if row_set is None or ( + len(row_set.get("row_ranges", [])) == 0 + and len(row_set.get("row_keys", [])) == 0 + ): + last_seen = last_seen_row_key + return { + "row_keys": [], + "row_ranges": [{"start_key_open": last_seen}], + } + # remove seen keys from user-specific key list + row_keys: list[bytes] = row_set.get("row_keys", []) + adjusted_keys = [k for k in row_keys if k > last_seen_row_key] + # adjust ranges to ignore keys before last seen + row_ranges: list[dict[str, Any]] = row_set.get("row_ranges", []) + adjusted_ranges = [] + for row_range in row_ranges: + end_key = row_range.get("end_key_closed", None) or row_range.get( + "end_key_open", None + ) + if end_key is None or end_key > last_seen_row_key: + # end range is after last seen key + new_range = row_range.copy() + start_key = row_range.get("start_key_closed", None) or row_range.get( + "start_key_open", None + ) + if start_key is None or start_key <= last_seen_row_key: + # replace start key with last seen + new_range["start_key_open"] = last_seen_row_key + new_range.pop("start_key_closed", None) + adjusted_ranges.append(new_range) + if len(adjusted_keys) == 0 and len(adjusted_ranges) == 0: + # if the query is empty after revision, raise an exception + # this will avoid an unwanted full table scan + raise _RowSetComplete() + return {"row_keys": adjusted_keys, "row_ranges": adjusted_ranges} + + @staticmethod + async def merge_row_response_stream( + response_generator: AsyncIterable[ReadRowsResponse], + state_machine: _StateMachine, + ) -> AsyncGenerator[Row, None]: + """ + Consume chunks from a ReadRowsResponse stream into a set of Rows + + Args: + - response_generator: AsyncIterable of ReadRowsResponse objects. Typically + this is a stream of chunks from the Bigtable API + Returns: + - AsyncGenerator of Rows + Raises: + - InvalidChunk: if the chunk stream is invalid + """ + async for row_response in response_generator: + # unwrap protoplus object for increased performance + response_pb = row_response._pb + last_scanned = response_pb.last_scanned_row_key + # if the server sends a scan heartbeat, notify the state machine. + if last_scanned: + yield state_machine.handle_last_scanned_row(last_scanned) + # process new chunks through the state machine. + for chunk in response_pb.chunks: + complete_row = state_machine.handle_chunk(chunk) + if complete_row is not None: + yield complete_row + # TODO: handle request stats + if not state_machine.is_terminal_state(): + # read rows is complete, but there's still data in the merger + raise InvalidChunk("read_rows completed with partial state remaining") + + +class ReadRowsAsyncIterator(AsyncIterable[Row]): + """ + Async iterator for ReadRows responses. + + Supports the AsyncIterator protocol for use in async for loops, + along with: + - `aclose` for closing the underlying stream + - `active` for checking if the iterator is still active + - an internal idle timer for closing the stream after a period of inactivity + """ + + def __init__(self, merger: _ReadRowsOperationAsync): + self._merger: _ReadRowsOperationAsync = merger + self._error: Exception | None = None + self._last_interaction_time = time.monotonic() + self._idle_timeout_task: asyncio.Task[None] | None = None + # wrap merger with a wrapper that properly formats exceptions + self._next_fn = _convert_retry_deadline( + self._merger.__anext__, + self._merger.operation_timeout, + self._merger.transient_errors, + ) + + async def _start_idle_timer(self, idle_timeout: float): + """ + Start a coroutine that will cancel a stream if no interaction + with the iterator occurs for the specified number of seconds. + + Subsequent access to the iterator will raise an IdleTimeout exception. + + Args: + - idle_timeout: number of seconds of inactivity before cancelling the stream + """ + self._last_interaction_time = time.monotonic() + if self._idle_timeout_task is not None: + self._idle_timeout_task.cancel() + self._idle_timeout_task = asyncio.create_task( + self._idle_timeout_coroutine(idle_timeout) + ) + if sys.version_info >= (3, 8): + self._idle_timeout_task.name = f"{self.__class__.__name__}.idle_timeout" + + @property + def active(self): + """ + Returns True if the iterator is still active and has not been closed + """ + return self._error is None + + async def _idle_timeout_coroutine(self, idle_timeout: float): + """ + Coroutine that will cancel a stream if no interaction with the iterator + in the last `idle_timeout` seconds. + """ + while self.active: + next_timeout = self._last_interaction_time + idle_timeout + await asyncio.sleep(next_timeout - time.monotonic()) + if ( + self._last_interaction_time + idle_timeout < time.monotonic() + and self.active + ): + # idle timeout has expired + await self._finish_with_error( + IdleTimeout( + ( + "Timed out waiting for next Row to be consumed. " + f"(idle_timeout={idle_timeout:0.1f}s)" + ) + ) + ) + + def __aiter__(self): + """Implement the async iterator protocol.""" + return self + + async def __anext__(self) -> Row: + """ + Implement the async iterator potocol. + + Return the next item in the stream if active, or + raise an exception if the stream has been closed. + """ + if self._error is not None: + raise self._error + try: + self._last_interaction_time = time.monotonic() + return await self._next_fn() + except Exception as e: + await self._finish_with_error(e) + raise e + + async def _finish_with_error(self, e: Exception): + """ + Helper function to close the stream and clean up resources + after an error has occurred. + """ + if self.active: + await self._merger.aclose() + self._error = e + if self._idle_timeout_task is not None: + self._idle_timeout_task.cancel() + self._idle_timeout_task = None + + async def aclose(self): + """ + Support closing the stream with an explicit call to aclose() + """ + await self._finish_with_error( + StopAsyncIteration(f"{self.__class__.__name__} closed") + ) diff --git a/google/cloud/bigtable/data/_async/client.py b/google/cloud/bigtable/data/_async/client.py new file mode 100644 index 000000000..3a5831799 --- /dev/null +++ b/google/cloud/bigtable/data/_async/client.py @@ -0,0 +1,1091 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from __future__ import annotations + +from typing import ( + cast, + Any, + Optional, + Set, + TYPE_CHECKING, +) + +import asyncio +import grpc +import time +import warnings +import sys +import random + +from collections import namedtuple + +from google.cloud.bigtable_v2.services.bigtable.client import BigtableClientMeta +from google.cloud.bigtable_v2.services.bigtable.async_client import BigtableAsyncClient +from google.cloud.bigtable_v2.services.bigtable.async_client import DEFAULT_CLIENT_INFO +from google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio import ( + PooledBigtableGrpcAsyncIOTransport, +) +from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest +from google.cloud.client import ClientWithProject +from google.api_core.exceptions import GoogleAPICallError +from google.api_core import retry_async as retries +from google.api_core import exceptions as core_exceptions +from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync +from google.cloud.bigtable.data._async._read_rows import ReadRowsAsyncIterator + +import google.auth.credentials +import google.auth._default +from google.api_core import client_options as client_options_lib +from google.cloud.bigtable.data.row import Row +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.exceptions import FailedQueryShardError +from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + +from google.cloud.bigtable.data.mutations import Mutation, RowMutationEntry +from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync +from google.cloud.bigtable.data._helpers import _make_metadata +from google.cloud.bigtable.data._helpers import _convert_retry_deadline +from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync +from google.cloud.bigtable.data._async.mutations_batcher import _MB_SIZE +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator + +from google.cloud.bigtable.data.read_modify_write_rules import ReadModifyWriteRule +from google.cloud.bigtable.data.row_filters import RowFilter +from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter +from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter +from google.cloud.bigtable.data.row_filters import RowFilterChain + +if TYPE_CHECKING: + from google.cloud.bigtable.data import RowKeySamples + from google.cloud.bigtable.data import ShardedQuery + +# used by read_rows_sharded to limit how many requests are attempted in parallel +CONCURRENCY_LIMIT = 10 + +# used to register instance data with the client for channel warming +_WarmedInstanceKey = namedtuple( + "_WarmedInstanceKey", ["instance_name", "table_name", "app_profile_id"] +) + + +class BigtableDataClientAsync(ClientWithProject): + def __init__( + self, + *, + project: str | None = None, + pool_size: int = 3, + credentials: google.auth.credentials.Credentials | None = None, + client_options: dict[str, Any] + | "google.api_core.client_options.ClientOptions" + | None = None, + ): + """ + Create a client instance for the Bigtable Data API + + Client should be created within an async context (running event loop) + + Args: + project: the project which the client acts on behalf of. + If not passed, falls back to the default inferred + from the environment. + pool_size: The number of grpc channels to maintain + in the internal channel pool. + credentials: + Thehe OAuth2 Credentials to use for this + client. If not passed (and if no ``_http`` object is + passed), falls back to the default inferred from the + environment. + client_options (Optional[Union[dict, google.api_core.client_options.ClientOptions]]): + Client options used to set user options + on the client. API Endpoint should be set through client_options. + Raises: + - RuntimeError if called outside of an async context (no running event loop) + - ValueError if pool_size is less than 1 + """ + # set up transport in registry + transport_str = f"pooled_grpc_asyncio_{pool_size}" + transport = PooledBigtableGrpcAsyncIOTransport.with_fixed_size(pool_size) + BigtableClientMeta._transport_registry[transport_str] = transport + # set up client info headers for veneer library + client_info = DEFAULT_CLIENT_INFO + client_info.client_library_version = client_info.gapic_version + # parse client options + if type(client_options) is dict: + client_options = client_options_lib.from_dict(client_options) + client_options = cast( + Optional[client_options_lib.ClientOptions], client_options + ) + # initialize client + ClientWithProject.__init__( + self, + credentials=credentials, + project=project, + client_options=client_options, + ) + self._gapic_client = BigtableAsyncClient( + transport=transport_str, + credentials=credentials, + client_options=client_options, + client_info=client_info, + ) + self.transport = cast( + PooledBigtableGrpcAsyncIOTransport, self._gapic_client.transport + ) + # keep track of active instances to for warmup on channel refresh + self._active_instances: Set[_WarmedInstanceKey] = set() + # keep track of table objects associated with each instance + # only remove instance from _active_instances when all associated tables remove it + self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {} + # attempt to start background tasks + self._channel_init_time = time.monotonic() + self._channel_refresh_tasks: list[asyncio.Task[None]] = [] + try: + self.start_background_channel_refresh() + except RuntimeError: + warnings.warn( + f"{self.__class__.__name__} should be started in an " + "asyncio event loop. Channel refresh will not be started", + RuntimeWarning, + stacklevel=2, + ) + + def start_background_channel_refresh(self) -> None: + """ + Starts a background task to ping and warm each channel in the pool + Raises: + - RuntimeError if not called in an asyncio event loop + """ + if not self._channel_refresh_tasks: + # raise RuntimeError if there is no event loop + asyncio.get_running_loop() + for channel_idx in range(self.transport.pool_size): + refresh_task = asyncio.create_task(self._manage_channel(channel_idx)) + if sys.version_info >= (3, 8): + # task names supported in Python 3.8+ + refresh_task.set_name( + f"{self.__class__.__name__} channel refresh {channel_idx}" + ) + self._channel_refresh_tasks.append(refresh_task) + + async def close(self, timeout: float = 2.0): + """ + Cancel all background tasks + """ + for task in self._channel_refresh_tasks: + task.cancel() + group = asyncio.gather(*self._channel_refresh_tasks, return_exceptions=True) + await asyncio.wait_for(group, timeout=timeout) + await self.transport.close() + self._channel_refresh_tasks = [] + + async def _ping_and_warm_instances( + self, channel: grpc.aio.Channel, instance_key: _WarmedInstanceKey | None = None + ) -> list[GoogleAPICallError | None]: + """ + Prepares the backend for requests on a channel + + Pings each Bigtable instance registered in `_active_instances` on the client + + Args: + - channel: grpc channel to warm + - instance_key: if provided, only warm the instance associated with the key + Returns: + - sequence of results or exceptions from the ping requests + """ + instance_list = ( + [instance_key] if instance_key is not None else self._active_instances + ) + ping_rpc = channel.unary_unary( + "/google.bigtable.v2.Bigtable/PingAndWarm", + request_serializer=PingAndWarmRequest.serialize, + ) + # prepare list of coroutines to run + tasks = [ + ping_rpc( + request={"name": instance_name, "app_profile_id": app_profile_id}, + metadata=[ + ( + "x-goog-request-params", + f"name={instance_name}&app_profile_id={app_profile_id}", + ) + ], + wait_for_ready=True, + ) + for (instance_name, table_name, app_profile_id) in instance_list + ] + # execute coroutines in parallel + result_list = await asyncio.gather(*tasks, return_exceptions=True) + # return None in place of empty successful responses + return [r or None for r in result_list] + + async def _manage_channel( + self, + channel_idx: int, + refresh_interval_min: float = 60 * 35, + refresh_interval_max: float = 60 * 45, + grace_period: float = 60 * 10, + ) -> None: + """ + Background coroutine that periodically refreshes and warms a grpc channel + + The backend will automatically close channels after 60 minutes, so + `refresh_interval` + `grace_period` should be < 60 minutes + + Runs continuously until the client is closed + + Args: + channel_idx: index of the channel in the transport's channel pool + refresh_interval_min: minimum interval before initiating refresh + process in seconds. Actual interval will be a random value + between `refresh_interval_min` and `refresh_interval_max` + refresh_interval_max: maximum interval before initiating refresh + process in seconds. Actual interval will be a random value + between `refresh_interval_min` and `refresh_interval_max` + grace_period: time to allow previous channel to serve existing + requests before closing, in seconds + """ + first_refresh = self._channel_init_time + random.uniform( + refresh_interval_min, refresh_interval_max + ) + next_sleep = max(first_refresh - time.monotonic(), 0) + if next_sleep > 0: + # warm the current channel immediately + channel = self.transport.channels[channel_idx] + await self._ping_and_warm_instances(channel) + # continuously refresh the channel every `refresh_interval` seconds + while True: + await asyncio.sleep(next_sleep) + # prepare new channel for use + new_channel = self.transport.grpc_channel._create_channel() + await self._ping_and_warm_instances(new_channel) + # cycle channel out of use, with long grace window before closure + start_timestamp = time.time() + await self.transport.replace_channel( + channel_idx, grace=grace_period, swap_sleep=10, new_channel=new_channel + ) + # subtract the time spent waiting for the channel to be replaced + next_refresh = random.uniform(refresh_interval_min, refresh_interval_max) + next_sleep = next_refresh - (time.time() - start_timestamp) + + async def _register_instance(self, instance_id: str, owner: TableAsync) -> None: + """ + Registers an instance with the client, and warms the channel pool + for the instance + The client will periodically refresh grpc channel pool used to make + requests, and new channels will be warmed for each registered instance + Channels will not be refreshed unless at least one instance is registered + + Args: + - instance_id: id of the instance to register. + - owner: table that owns the instance. Owners will be tracked in + _instance_owners, and instances will only be unregistered when all + owners call _remove_instance_registration + """ + instance_name = self._gapic_client.instance_path(self.project, instance_id) + instance_key = _WarmedInstanceKey( + instance_name, owner.table_name, owner.app_profile_id + ) + self._instance_owners.setdefault(instance_key, set()).add(id(owner)) + if instance_name not in self._active_instances: + self._active_instances.add(instance_key) + if self._channel_refresh_tasks: + # refresh tasks already running + # call ping and warm on all existing channels + for channel in self.transport.channels: + await self._ping_and_warm_instances(channel, instance_key) + else: + # refresh tasks aren't active. start them as background tasks + self.start_background_channel_refresh() + + async def _remove_instance_registration( + self, instance_id: str, owner: TableAsync + ) -> bool: + """ + Removes an instance from the client's registered instances, to prevent + warming new channels for the instance + + If instance_id is not registered, or is still in use by other tables, returns False + + Args: + - instance_id: id of the instance to remove + - owner: table that owns the instance. Owners will be tracked in + _instance_owners, and instances will only be unregistered when all + owners call _remove_instance_registration + Returns: + - True if instance was removed + """ + instance_name = self._gapic_client.instance_path(self.project, instance_id) + instance_key = _WarmedInstanceKey( + instance_name, owner.table_name, owner.app_profile_id + ) + owner_list = self._instance_owners.get(instance_key, set()) + try: + owner_list.remove(id(owner)) + if len(owner_list) == 0: + self._active_instances.remove(instance_key) + return True + except KeyError: + return False + + # TODO: revisit timeouts https://github.com/googleapis/python-bigtable/issues/782 + def get_table( + self, + instance_id: str, + table_id: str, + app_profile_id: str | None = None, + default_operation_timeout: float = 600, + default_per_request_timeout: float | None = None, + ) -> TableAsync: + """ + Returns a table instance for making data API requests + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. + app_profile_id: (Optional) The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + """ + return TableAsync( + self, + instance_id, + table_id, + app_profile_id, + default_operation_timeout=default_operation_timeout, + default_per_request_timeout=default_per_request_timeout, + ) + + async def __aenter__(self): + self.start_background_channel_refresh() + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() + await self._gapic_client.__aexit__(exc_type, exc_val, exc_tb) + + +class TableAsync: + """ + Main Data API surface + + Table object maintains table_id, and app_profile_id context, and passes them with + each call + """ + + def __init__( + self, + client: BigtableDataClientAsync, + instance_id: str, + table_id: str, + app_profile_id: str | None = None, + *, + default_operation_timeout: float = 600, + default_per_request_timeout: float | None = None, + ): + """ + Initialize a Table instance + + Must be created within an async context (running event loop) + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + app_profile_id: (Optional) The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_operation_timeout: (Optional) The default timeout, in seconds + default_per_request_timeout: (Optional) The default timeout for individual + rpc requests, in seconds + Raises: + - RuntimeError if called outside of an async context (no running event loop) + """ + # validate timeouts + if default_operation_timeout <= 0: + raise ValueError("default_operation_timeout must be greater than 0") + if default_per_request_timeout is not None and default_per_request_timeout <= 0: + raise ValueError("default_per_request_timeout must be greater than 0") + if ( + default_per_request_timeout is not None + and default_per_request_timeout > default_operation_timeout + ): + raise ValueError( + "default_per_request_timeout must be less than default_operation_timeout" + ) + self.client = client + self.instance_id = instance_id + self.instance_name = self.client._gapic_client.instance_path( + self.client.project, instance_id + ) + self.table_id = table_id + self.table_name = self.client._gapic_client.table_path( + self.client.project, instance_id, table_id + ) + self.app_profile_id = app_profile_id + + self.default_operation_timeout = default_operation_timeout + self.default_per_request_timeout = default_per_request_timeout + + # raises RuntimeError if called outside of an async context (no running event loop) + try: + self._register_instance_task = asyncio.create_task( + self.client._register_instance(instance_id, self) + ) + except RuntimeError as e: + raise RuntimeError( + f"{self.__class__.__name__} must be created within an async event loop context." + ) from e + + async def read_rows_stream( + self, + query: ReadRowsQuery | dict[str, Any], + *, + operation_timeout: float | None = None, + per_request_timeout: float | None = None, + ) -> ReadRowsAsyncIterator: + """ + Returns an iterator to asynchronously stream back row data. + + Failed requests within operation_timeout and operation_deadline policies will be retried. + + Args: + - query: contains details about which rows to return + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + time is only counted while actively waiting on the network. + If None, defaults to the Table's default_operation_timeout + - per_request_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + If None, defaults to the Table's default_per_request_timeout + + Returns: + - an asynchronous iterator that yields rows returned by the query + Raises: + - DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + - GoogleAPIError: raised if the request encounters an unrecoverable error + - IdleTimeout: if iterator was abandoned + """ + + operation_timeout = operation_timeout or self.default_operation_timeout + per_request_timeout = per_request_timeout or self.default_per_request_timeout + + if operation_timeout <= 0: + raise ValueError("operation_timeout must be greater than 0") + if per_request_timeout is not None and per_request_timeout <= 0: + raise ValueError("per_request_timeout must be greater than 0") + if per_request_timeout is not None and per_request_timeout > operation_timeout: + raise ValueError( + "per_request_timeout must not be greater than operation_timeout" + ) + if per_request_timeout is None: + per_request_timeout = operation_timeout + request = query._to_dict() if isinstance(query, ReadRowsQuery) else query + request["table_name"] = self.table_name + if self.app_profile_id: + request["app_profile_id"] = self.app_profile_id + + # read_rows smart retries is implemented using a series of iterators: + # - client.read_rows: outputs raw ReadRowsResponse objects from backend. Has per_request_timeout + # - ReadRowsOperation.merge_row_response_stream: parses chunks into rows + # - ReadRowsOperation.retryable_merge_rows: adds retries, caching, revised requests, per_request_timeout + # - ReadRowsAsyncIterator: adds idle_timeout, moves stats out of stream and into attribute + row_merger = _ReadRowsOperationAsync( + request, + self.client._gapic_client, + operation_timeout=operation_timeout, + per_request_timeout=per_request_timeout, + ) + output_generator = ReadRowsAsyncIterator(row_merger) + # add idle timeout to clear resources if generator is abandoned + idle_timeout_seconds = 300 + await output_generator._start_idle_timer(idle_timeout_seconds) + return output_generator + + async def read_rows( + self, + query: ReadRowsQuery | dict[str, Any], + *, + operation_timeout: float | None = None, + per_request_timeout: float | None = None, + ) -> list[Row]: + """ + Helper function that returns a full list instead of a generator + + See read_rows_stream + + Returns: + - a list of the rows returned by the query + """ + row_generator = await self.read_rows_stream( + query, + operation_timeout=operation_timeout, + per_request_timeout=per_request_timeout, + ) + results = [row async for row in row_generator] + return results + + async def read_row( + self, + row_key: str | bytes, + *, + row_filter: RowFilter | None = None, + operation_timeout: int | float | None = 60, + per_request_timeout: int | float | None = None, + ) -> Row | None: + """ + Helper function to return a single row + + See read_rows_stream + + Raises: + - google.cloud.bigtable.data.exceptions.RowNotFound: if the row does not exist + Returns: + - the individual row requested, or None if it does not exist + """ + if row_key is None: + raise ValueError("row_key must be string or bytes") + query = ReadRowsQuery(row_keys=row_key, row_filter=row_filter, limit=1) + results = await self.read_rows( + query, + operation_timeout=operation_timeout, + per_request_timeout=per_request_timeout, + ) + if len(results) == 0: + return None + return results[0] + + async def read_rows_sharded( + self, + sharded_query: ShardedQuery, + *, + operation_timeout: int | float | None = None, + per_request_timeout: int | float | None = None, + ) -> list[Row]: + """ + Runs a sharded query in parallel, then return the results in a single list. + Results will be returned in the order of the input queries. + + This function is intended to be run on the results on a query.shard() call: + + ``` + table_shard_keys = await table.sample_row_keys() + query = ReadRowsQuery(...) + shard_queries = query.shard(table_shard_keys) + results = await table.read_rows_sharded(shard_queries) + ``` + + Args: + - sharded_query: a sharded query to execute + Raises: + - ShardedReadRowsExceptionGroup: if any of the queries failed + - ValueError: if the query_list is empty + """ + if not sharded_query: + raise ValueError("empty sharded_query") + # reduce operation_timeout between batches + operation_timeout = operation_timeout or self.default_operation_timeout + per_request_timeout = ( + per_request_timeout or self.default_per_request_timeout or operation_timeout + ) + timeout_generator = _attempt_timeout_generator( + operation_timeout, operation_timeout + ) + # submit shards in batches if the number of shards goes over CONCURRENCY_LIMIT + batched_queries = [ + sharded_query[i : i + CONCURRENCY_LIMIT] + for i in range(0, len(sharded_query), CONCURRENCY_LIMIT) + ] + # run batches and collect results + results_list = [] + error_dict = {} + shard_idx = 0 + for batch in batched_queries: + batch_operation_timeout = next(timeout_generator) + routine_list = [ + self.read_rows( + query, + operation_timeout=batch_operation_timeout, + per_request_timeout=min( + per_request_timeout, batch_operation_timeout + ), + ) + for query in batch + ] + batch_result = await asyncio.gather(*routine_list, return_exceptions=True) + for result in batch_result: + if isinstance(result, Exception): + error_dict[shard_idx] = result + else: + results_list.extend(result) + shard_idx += 1 + if error_dict: + # if any sub-request failed, raise an exception instead of returning results + raise ShardedReadRowsExceptionGroup( + [ + FailedQueryShardError(idx, sharded_query[idx], e) + for idx, e in error_dict.items() + ], + results_list, + len(sharded_query), + ) + return results_list + + async def row_exists( + self, + row_key: str | bytes, + *, + operation_timeout: int | float | None = 60, + per_request_timeout: int | float | None = None, + ) -> bool: + """ + Helper function to determine if a row exists + + uses the filters: chain(limit cells per row = 1, strip value) + + Returns: + - a bool indicating whether the row exists + """ + if row_key is None: + raise ValueError("row_key must be string or bytes") + strip_filter = StripValueTransformerFilter(flag=True) + limit_filter = CellsRowLimitFilter(1) + chain_filter = RowFilterChain(filters=[limit_filter, strip_filter]) + query = ReadRowsQuery(row_keys=row_key, limit=1, row_filter=chain_filter) + results = await self.read_rows( + query, + operation_timeout=operation_timeout, + per_request_timeout=per_request_timeout, + ) + return len(results) > 0 + + async def sample_row_keys( + self, + *, + operation_timeout: float | None = None, + per_request_timeout: float | None = None, + ) -> RowKeySamples: + """ + Return a set of RowKeySamples that delimit contiguous sections of the table of + approximately equal size + + RowKeySamples output can be used with ReadRowsQuery.shard() to create a sharded query that + can be parallelized across multiple backend nodes read_rows and read_rows_stream + requests will call sample_row_keys internally for this purpose when sharding is enabled + + RowKeySamples is simply a type alias for list[tuple[bytes, int]]; a list of + row_keys, along with offset positions in the table + + Returns: + - a set of RowKeySamples the delimit contiguous sections of the table + Raises: + - GoogleAPICallError: if the sample_row_keys request fails + """ + # prepare timeouts + operation_timeout = operation_timeout or self.default_operation_timeout + per_request_timeout = per_request_timeout or self.default_per_request_timeout + + if operation_timeout <= 0: + raise ValueError("operation_timeout must be greater than 0") + if per_request_timeout is not None and per_request_timeout <= 0: + raise ValueError("per_request_timeout must be greater than 0") + if per_request_timeout is not None and per_request_timeout > operation_timeout: + raise ValueError( + "per_request_timeout must not be greater than operation_timeout" + ) + attempt_timeout_gen = _attempt_timeout_generator( + per_request_timeout, operation_timeout + ) + # prepare retryable + predicate = retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ) + transient_errors = [] + + def on_error_fn(exc): + # add errors to list if retryable + if predicate(exc): + transient_errors.append(exc) + + retry = retries.AsyncRetry( + predicate=predicate, + timeout=operation_timeout, + initial=0.01, + multiplier=2, + maximum=60, + on_error=on_error_fn, + is_stream=False, + ) + + # prepare request + metadata = _make_metadata(self.table_name, self.app_profile_id) + + async def execute_rpc(): + results = await self.client._gapic_client.sample_row_keys( + table_name=self.table_name, + app_profile_id=self.app_profile_id, + timeout=next(attempt_timeout_gen), + metadata=metadata, + ) + return [(s.row_key, s.offset_bytes) async for s in results] + + wrapped_fn = _convert_retry_deadline( + retry(execute_rpc), operation_timeout, transient_errors + ) + return await wrapped_fn() + + def mutations_batcher( + self, + *, + flush_interval: float | None = 5, + flush_limit_mutation_count: int | None = 1000, + flush_limit_bytes: int = 20 * _MB_SIZE, + flow_control_max_mutation_count: int = 100_000, + flow_control_max_bytes: int = 100 * _MB_SIZE, + batch_operation_timeout: float | None = None, + batch_per_request_timeout: float | None = None, + ) -> MutationsBatcherAsync: + """ + Returns a new mutations batcher instance. + + Can be used to iteratively add mutations that are flushed as a group, + to avoid excess network calls + + Args: + - flush_interval: Automatically flush every flush_interval seconds. If None, + a table default will be used + - flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count + mutations are added across all entries. If None, this limit is ignored. + - flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. + - flow_control_max_mutation_count: Maximum number of inflight mutations. + - flow_control_max_bytes: Maximum number of inflight bytes. + - batch_operation_timeout: timeout for each mutate_rows operation, in seconds. If None, + table default_operation_timeout will be used + - batch_per_request_timeout: timeout for each individual request, in seconds. If None, + table default_per_request_timeout will be used + Returns: + - a MutationsBatcherAsync context manager that can batch requests + """ + return MutationsBatcherAsync( + self, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_mutation_count, + flush_limit_bytes=flush_limit_bytes, + flow_control_max_mutation_count=flow_control_max_mutation_count, + flow_control_max_bytes=flow_control_max_bytes, + batch_operation_timeout=batch_operation_timeout, + batch_per_request_timeout=batch_per_request_timeout, + ) + + async def mutate_row( + self, + row_key: str | bytes, + mutations: list[Mutation] | Mutation, + *, + operation_timeout: float | None = 60, + per_request_timeout: float | None = None, + ): + """ + Mutates a row atomically. + + Cells already present in the row are left unchanged unless explicitly changed + by ``mutation``. + + Idempotent operations (i.e, all mutations have an explicit timestamp) will be + retried on server failure. Non-idempotent operations will not. + + Args: + - row_key: the row to apply mutations to + - mutations: the set of mutations to apply to the row + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + time is only counted while actively waiting on the network. + DeadlineExceeded exception raised after timeout + - per_request_timeout: the time budget for an individual network request, + in seconds. If it takes longer than this time to complete, the request + will be cancelled with a DeadlineExceeded exception, and a retry will be + attempted if within operation_timeout budget + + Raises: + - DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing all + GoogleAPIError exceptions from any retries that failed + - GoogleAPIError: raised on non-idempotent operations that cannot be + safely retried. + """ + operation_timeout = operation_timeout or self.default_operation_timeout + per_request_timeout = per_request_timeout or self.default_per_request_timeout + + if operation_timeout <= 0: + raise ValueError("operation_timeout must be greater than 0") + if per_request_timeout is not None and per_request_timeout <= 0: + raise ValueError("per_request_timeout must be greater than 0") + if per_request_timeout is not None and per_request_timeout > operation_timeout: + raise ValueError("per_request_timeout must be less than operation_timeout") + + if isinstance(row_key, str): + row_key = row_key.encode("utf-8") + request = {"table_name": self.table_name, "row_key": row_key} + if self.app_profile_id: + request["app_profile_id"] = self.app_profile_id + + if isinstance(mutations, Mutation): + mutations = [mutations] + request["mutations"] = [mutation._to_dict() for mutation in mutations] + + if all(mutation.is_idempotent() for mutation in mutations): + # mutations are all idempotent and safe to retry + predicate = retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ) + else: + # mutations should not be retried + predicate = retries.if_exception_type() + + transient_errors = [] + + def on_error_fn(exc): + if predicate(exc): + transient_errors.append(exc) + + retry = retries.AsyncRetry( + predicate=predicate, + on_error=on_error_fn, + timeout=operation_timeout, + initial=0.01, + multiplier=2, + maximum=60, + ) + # wrap rpc in retry logic + retry_wrapped = retry(self.client._gapic_client.mutate_row) + # convert RetryErrors from retry wrapper into DeadlineExceeded errors + deadline_wrapped = _convert_retry_deadline( + retry_wrapped, operation_timeout, transient_errors + ) + metadata = _make_metadata(self.table_name, self.app_profile_id) + # trigger rpc + await deadline_wrapped(request, timeout=per_request_timeout, metadata=metadata) + + async def bulk_mutate_rows( + self, + mutation_entries: list[RowMutationEntry], + *, + operation_timeout: float | None = 60, + per_request_timeout: float | None = None, + ): + """ + Applies mutations for multiple rows in a single batched request. + + Each individual RowMutationEntry is applied atomically, but separate entries + may be applied in arbitrary order (even for entries targetting the same row) + In total, the row_mutations can contain at most 100000 individual mutations + across all entries + + Idempotent entries (i.e., entries with mutations with explicit timestamps) + will be retried on failure. Non-idempotent will not, and will reported in a + raised exception group + + Args: + - mutation_entries: the batches of mutations to apply + Each entry will be applied atomically, but entries will be applied + in arbitrary order + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + time is only counted while actively waiting on the network. + DeadlineExceeded exception raised after timeout + - per_request_timeout: the time budget for an individual network request, + in seconds. If it takes longer than this time to complete, the request + will be cancelled with a DeadlineExceeded exception, and a retry will + be attempted if within operation_timeout budget + Raises: + - MutationsExceptionGroup if one or more mutations fails + Contains details about any failed entries in .exceptions + """ + operation_timeout = operation_timeout or self.default_operation_timeout + per_request_timeout = per_request_timeout or self.default_per_request_timeout + + if operation_timeout <= 0: + raise ValueError("operation_timeout must be greater than 0") + if per_request_timeout is not None and per_request_timeout <= 0: + raise ValueError("per_request_timeout must be greater than 0") + if per_request_timeout is not None and per_request_timeout > operation_timeout: + raise ValueError("per_request_timeout must be less than operation_timeout") + + operation = _MutateRowsOperationAsync( + self.client._gapic_client, + self, + mutation_entries, + operation_timeout, + per_request_timeout, + ) + await operation.start() + + async def check_and_mutate_row( + self, + row_key: str | bytes, + predicate: RowFilter | dict[str, Any] | None, + *, + true_case_mutations: Mutation | list[Mutation] | None = None, + false_case_mutations: Mutation | list[Mutation] | None = None, + operation_timeout: int | float | None = 20, + ) -> bool: + """ + Mutates a row atomically based on the output of a predicate filter + + Non-idempotent operation: will not be retried + + Args: + - row_key: the key of the row to mutate + - predicate: the filter to be applied to the contents of the specified row. + Depending on whether or not any results are yielded, + either true_case_mutations or false_case_mutations will be executed. + If None, checks that the row contains any values at all. + - true_case_mutations: + Changes to be atomically applied to the specified row if + predicate yields at least one cell when + applied to row_key. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + false_case_mutations is empty, and at most 100000. + - false_case_mutations: + Changes to be atomically applied to the specified row if + predicate_filter does not yield any cells when + applied to row_key. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + `true_case_mutations is empty, and at most 100000. + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will not be retried. + Returns: + - bool indicating whether the predicate was true or false + Raises: + - GoogleAPIError exceptions from grpc call + """ + operation_timeout = operation_timeout or self.default_operation_timeout + if operation_timeout <= 0: + raise ValueError("operation_timeout must be greater than 0") + row_key = row_key.encode("utf-8") if isinstance(row_key, str) else row_key + if true_case_mutations is not None and not isinstance( + true_case_mutations, list + ): + true_case_mutations = [true_case_mutations] + true_case_dict = [m._to_dict() for m in true_case_mutations or []] + if false_case_mutations is not None and not isinstance( + false_case_mutations, list + ): + false_case_mutations = [false_case_mutations] + false_case_dict = [m._to_dict() for m in false_case_mutations or []] + if predicate is not None and not isinstance(predicate, dict): + predicate = predicate.to_dict() + metadata = _make_metadata(self.table_name, self.app_profile_id) + result = await self.client._gapic_client.check_and_mutate_row( + request={ + "predicate_filter": predicate, + "true_mutations": true_case_dict, + "false_mutations": false_case_dict, + "table_name": self.table_name, + "row_key": row_key, + "app_profile_id": self.app_profile_id, + }, + metadata=metadata, + timeout=operation_timeout, + ) + return result.predicate_matched + + async def read_modify_write_row( + self, + row_key: str | bytes, + rules: ReadModifyWriteRule | list[ReadModifyWriteRule], + *, + operation_timeout: int | float | None = 20, + ) -> Row: + """ + Reads and modifies a row atomically according to input ReadModifyWriteRules, + and returns the contents of all modified cells + + The new value for the timestamp is the greater of the existing timestamp or + the current server time. + + Non-idempotent operation: will not be retried + + Args: + - row_key: the key of the row to apply read/modify/write rules to + - rules: A rule or set of rules to apply to the row. + Rules are applied in order, meaning that earlier rules will affect the + results of later ones. + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will not be retried. + Returns: + - Row: containing cell data that was modified as part of the + operation + Raises: + - GoogleAPIError exceptions from grpc call + """ + operation_timeout = operation_timeout or self.default_operation_timeout + row_key = row_key.encode("utf-8") if isinstance(row_key, str) else row_key + if operation_timeout <= 0: + raise ValueError("operation_timeout must be greater than 0") + if rules is not None and not isinstance(rules, list): + rules = [rules] + if not rules: + raise ValueError("rules must contain at least one item") + # concert to dict representation + rules_dict = [rule._to_dict() for rule in rules] + metadata = _make_metadata(self.table_name, self.app_profile_id) + result = await self.client._gapic_client.read_modify_write_row( + request={ + "rules": rules_dict, + "table_name": self.table_name, + "row_key": row_key, + "app_profile_id": self.app_profile_id, + }, + metadata=metadata, + timeout=operation_timeout, + ) + # construct Row from result + return Row._from_pb(result.row) + + async def close(self): + """ + Called to close the Table instance and release any resources held by it. + """ + self._register_instance_task.cancel() + await self.client._remove_instance_registration(self.instance_id, self) + + async def __aenter__(self): + """ + Implement async context manager protocol + + Ensure registration task has time to run, so that + grpc channels will be warmed for the specified instance + """ + await self._register_instance_task + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + """ + Implement async context manager protocol + + Unregister this instance with the client, so that + grpc channels will no longer be warmed + """ + await self.close() diff --git a/google/cloud/bigtable/mutations_batcher.py b/google/cloud/bigtable/data/_async/mutations_batcher.py similarity index 96% rename from google/cloud/bigtable/mutations_batcher.py rename to google/cloud/bigtable/data/_async/mutations_batcher.py index 68c3f9fbe..25aafc2a1 100644 --- a/google/cloud/bigtable/mutations_batcher.py +++ b/google/cloud/bigtable/data/_async/mutations_batcher.py @@ -20,22 +20,24 @@ import warnings from collections import deque -from google.cloud.bigtable.mutations import RowMutationEntry -from google.cloud.bigtable.exceptions import MutationsExceptionGroup -from google.cloud.bigtable.exceptions import FailedMutationEntryError +from google.cloud.bigtable.data.mutations import RowMutationEntry +from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup +from google.cloud.bigtable.data.exceptions import FailedMutationEntryError -from google.cloud.bigtable._mutate_rows import _MutateRowsOperation -from google.cloud.bigtable._mutate_rows import MUTATE_ROWS_REQUEST_MUTATION_LIMIT -from google.cloud.bigtable.mutations import Mutation +from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync +from google.cloud.bigtable.data._async._mutate_rows import ( + MUTATE_ROWS_REQUEST_MUTATION_LIMIT, +) +from google.cloud.bigtable.data.mutations import Mutation if TYPE_CHECKING: - from google.cloud.bigtable.client import Table # pragma: no cover + from google.cloud.bigtable.data._async.client import TableAsync # used to make more readable default values _MB_SIZE = 1024 * 1024 -class _FlowControl: +class _FlowControlAsync: """ Manages flow control for batched mutations. Mutations are registered against the FlowControl object before being sent, which will block if size or count @@ -159,7 +161,7 @@ async def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry] yield mutations[start_idx:end_idx] -class MutationsBatcher: +class MutationsBatcherAsync: """ Allows users to send batches using context manager API: @@ -179,7 +181,7 @@ class MutationsBatcher: def __init__( self, - table: "Table", + table: "TableAsync", *, flush_interval: float | None = 5, flush_limit_mutation_count: int | None = 1000, @@ -224,7 +226,7 @@ def __init__( self._table = table self._staged_entries: list[RowMutationEntry] = [] self._staged_count, self._staged_bytes = 0, 0 - self._flow_control = _FlowControl( + self._flow_control = _FlowControlAsync( flow_control_max_mutation_count, flow_control_max_bytes ) self._flush_limit_bytes = flush_limit_bytes @@ -354,7 +356,7 @@ async def _execute_mutate_rows( if self._table.app_profile_id: request["app_profile_id"] = self._table.app_profile_id try: - operation = _MutateRowsOperation( + operation = _MutateRowsOperationAsync( self._table.client._gapic_client, self._table, batch, diff --git a/google/cloud/bigtable/_helpers.py b/google/cloud/bigtable/data/_helpers.py similarity index 98% rename from google/cloud/bigtable/_helpers.py rename to google/cloud/bigtable/data/_helpers.py index 722fac9f4..64d91e108 100644 --- a/google/cloud/bigtable/_helpers.py +++ b/google/cloud/bigtable/data/_helpers.py @@ -18,7 +18,7 @@ import time from google.api_core import exceptions as core_exceptions -from google.cloud.bigtable.exceptions import RetryExceptionGroup +from google.cloud.bigtable.data.exceptions import RetryExceptionGroup """ Helper functions used in various places in the library. diff --git a/google/cloud/bigtable/_read_rows.py b/google/cloud/bigtable/data/_read_rows_state_machine.py similarity index 54% rename from google/cloud/bigtable/_read_rows.py rename to google/cloud/bigtable/data/_read_rows_state_machine.py index ee094f1a7..7c0d05fb9 100644 --- a/google/cloud/bigtable/_read_rows.py +++ b/google/cloud/bigtable/data/_read_rows_state_machine.py @@ -14,35 +14,14 @@ # from __future__ import annotations -from typing import ( - List, - Any, - AsyncIterable, - AsyncIterator, - AsyncGenerator, - Iterator, - Callable, - Awaitable, - Type, -) - -import asyncio -from functools import partial -from grpc.aio import RpcContext +from typing import Type from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse -from google.cloud.bigtable_v2.services.bigtable.async_client import BigtableAsyncClient -from google.cloud.bigtable.row import Row, Cell, _LastScannedRow -from google.cloud.bigtable.exceptions import InvalidChunk -from google.cloud.bigtable.exceptions import _RowSetComplete -from google.api_core import retry_async as retries -from google.api_core import exceptions as core_exceptions -from google.cloud.bigtable._helpers import _make_metadata -from google.cloud.bigtable._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data.row import Row, Cell, _LastScannedRow +from google.cloud.bigtable.data.exceptions import InvalidChunk """ -This module provides a set of classes for merging ReadRowsResponse chunks -into Row objects. +This module provides classes for the read_rows state machine: - ReadRowsOperation is the highest level class, providing an interface for asynchronous merging end-to-end @@ -56,253 +35,6 @@ """ -class _ReadRowsOperation(AsyncIterable[Row]): - """ - ReadRowsOperation handles the logic of merging chunks from a ReadRowsResponse stream - into a stream of Row objects. - - ReadRowsOperation.merge_row_response_stream takes in a stream of ReadRowsResponse - and turns them into a stream of Row objects using an internal - StateMachine. - - ReadRowsOperation(request, client) handles row merging logic end-to-end, including - performing retries on stream errors. - """ - - def __init__( - self, - request: dict[str, Any], - client: BigtableAsyncClient, - *, - operation_timeout: float = 600.0, - per_request_timeout: float | None = None, - ): - """ - Args: - - request: the request dict to send to the Bigtable API - - client: the Bigtable client to use to make the request - - operation_timeout: the timeout to use for the entire operation, in seconds - - per_request_timeout: the timeout to use when waiting for each individual grpc request, in seconds - If not specified, defaults to operation_timeout - """ - self._last_emitted_row_key: bytes | None = None - self._emit_count = 0 - self._request = request - self.operation_timeout = operation_timeout - # use generator to lower per-attempt timeout as we approach operation_timeout deadline - attempt_timeout_gen = _attempt_timeout_generator( - per_request_timeout, operation_timeout - ) - row_limit = request.get("rows_limit", 0) - # lock in paramters for retryable wrapper - self._partial_retryable = partial( - self._read_rows_retryable_attempt, - client.read_rows, - attempt_timeout_gen, - row_limit, - ) - predicate = retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - core_exceptions.Aborted, - ) - - def on_error_fn(exc): - if predicate(exc): - self.transient_errors.append(exc) - - retry = retries.AsyncRetry( - predicate=predicate, - timeout=self.operation_timeout, - initial=0.01, - multiplier=2, - maximum=60, - on_error=on_error_fn, - is_stream=True, - ) - self._stream: AsyncGenerator[Row, None] | None = retry( - self._partial_retryable - )() - # contains the list of errors that were retried - self.transient_errors: List[Exception] = [] - - def __aiter__(self) -> AsyncIterator[Row]: - """Implements the AsyncIterable interface""" - return self - - async def __anext__(self) -> Row: - """Implements the AsyncIterator interface""" - if self._stream is not None: - return await self._stream.__anext__() - else: - raise asyncio.InvalidStateError("stream is closed") - - async def aclose(self): - """Close the stream and release resources""" - if self._stream is not None: - await self._stream.aclose() - self._stream = None - self._emitted_seen_row_key = None - - async def _read_rows_retryable_attempt( - self, - gapic_fn: Callable[..., Awaitable[AsyncIterable[ReadRowsResponse]]], - timeout_generator: Iterator[float], - total_row_limit: int, - ) -> AsyncGenerator[Row, None]: - """ - Retryable wrapper for merge_rows. This function is called each time - a retry is attempted. - - Some fresh state is created on each retry: - - grpc network stream - - state machine to hold merge chunks received from stream - Some state is shared between retries: - - _last_emitted_row_key is used to ensure that - duplicate rows are not emitted - - request is stored and (potentially) modified on each retry - """ - if self._last_emitted_row_key is not None: - # if this is a retry, try to trim down the request to avoid ones we've already processed - try: - self._request["rows"] = _ReadRowsOperation._revise_request_rowset( - row_set=self._request.get("rows", None), - last_seen_row_key=self._last_emitted_row_key, - ) - except _RowSetComplete: - # if there are no rows left to process, we're done - # This is not expected to happen often, but could occur if - # a retry is triggered quickly after the last row is emitted - return - # revise next request's row limit based on number emitted - if total_row_limit: - new_limit = total_row_limit - self._emit_count - if new_limit == 0: - # we have hit the row limit, so we're done - return - elif new_limit < 0: - raise RuntimeError("unexpected state: emit count exceeds row limit") - else: - self._request["rows_limit"] = new_limit - metadata = _make_metadata( - self._request.get("table_name", None), - self._request.get("app_profile_id", None), - ) - new_gapic_stream: RpcContext = await gapic_fn( - self._request, - timeout=next(timeout_generator), - metadata=metadata, - ) - try: - state_machine = _StateMachine() - stream = _ReadRowsOperation.merge_row_response_stream( - new_gapic_stream, state_machine - ) - # run until we get a timeout or the stream is exhausted - async for new_item in stream: - if ( - self._last_emitted_row_key is not None - and new_item.row_key <= self._last_emitted_row_key - ): - raise InvalidChunk("Last emitted row key out of order") - # don't yeild _LastScannedRow markers; they - # should only update last_seen_row_key - if not isinstance(new_item, _LastScannedRow): - yield new_item - self._emit_count += 1 - self._last_emitted_row_key = new_item.row_key - if total_row_limit and self._emit_count >= total_row_limit: - return - except (Exception, GeneratorExit) as exc: - # ensure grpc stream is closed - new_gapic_stream.cancel() - raise exc - - @staticmethod - def _revise_request_rowset( - row_set: dict[str, Any] | None, - last_seen_row_key: bytes, - ) -> dict[str, Any]: - """ - Revise the rows in the request to avoid ones we've already processed. - - Args: - - row_set: the row set from the request - - last_seen_row_key: the last row key encountered - Raises: - - _RowSetComplete: if there are no rows left to process after the revision - """ - # if user is doing a whole table scan, start a new one with the last seen key - if row_set is None or ( - len(row_set.get("row_ranges", [])) == 0 - and len(row_set.get("row_keys", [])) == 0 - ): - last_seen = last_seen_row_key - return { - "row_keys": [], - "row_ranges": [{"start_key_open": last_seen}], - } - # remove seen keys from user-specific key list - row_keys: list[bytes] = row_set.get("row_keys", []) - adjusted_keys = [k for k in row_keys if k > last_seen_row_key] - # adjust ranges to ignore keys before last seen - row_ranges: list[dict[str, Any]] = row_set.get("row_ranges", []) - adjusted_ranges = [] - for row_range in row_ranges: - end_key = row_range.get("end_key_closed", None) or row_range.get( - "end_key_open", None - ) - if end_key is None or end_key > last_seen_row_key: - # end range is after last seen key - new_range = row_range.copy() - start_key = row_range.get("start_key_closed", None) or row_range.get( - "start_key_open", None - ) - if start_key is None or start_key <= last_seen_row_key: - # replace start key with last seen - new_range["start_key_open"] = last_seen_row_key - new_range.pop("start_key_closed", None) - adjusted_ranges.append(new_range) - if len(adjusted_keys) == 0 and len(adjusted_ranges) == 0: - # if the query is empty after revision, raise an exception - # this will avoid an unwanted full table scan - raise _RowSetComplete() - return {"row_keys": adjusted_keys, "row_ranges": adjusted_ranges} - - @staticmethod - async def merge_row_response_stream( - response_generator: AsyncIterable[ReadRowsResponse], - state_machine: _StateMachine, - ) -> AsyncGenerator[Row, None]: - """ - Consume chunks from a ReadRowsResponse stream into a set of Rows - - Args: - - response_generator: AsyncIterable of ReadRowsResponse objects. Typically - this is a stream of chunks from the Bigtable API - Returns: - - AsyncGenerator of Rows - Raises: - - InvalidChunk: if the chunk stream is invalid - """ - async for row_response in response_generator: - # unwrap protoplus object for increased performance - response_pb = row_response._pb - last_scanned = response_pb.last_scanned_row_key - # if the server sends a scan heartbeat, notify the state machine. - if last_scanned: - yield state_machine.handle_last_scanned_row(last_scanned) - # process new chunks through the state machine. - for chunk in response_pb.chunks: - complete_row = state_machine.handle_chunk(chunk) - if complete_row is not None: - yield complete_row - # TODO: handle request stats - if not state_machine.is_terminal_state(): - # read rows is complete, but there's still data in the merger - raise InvalidChunk("read_rows completed with partial state remaining") - - class _StateMachine: """ State Machine converts chunks into Rows @@ -579,7 +311,7 @@ def reset(self) -> None: self.current_key: bytes | None = None self.working_cell: Cell | None = None self.working_value: bytearray | None = None - self.completed_cells: List[Cell] = [] + self.completed_cells: list[Cell] = [] def start_row(self, key: bytes) -> None: """Called to start a new row. This will be called once per row""" @@ -590,7 +322,7 @@ def start_cell( family: str, qualifier: bytes, timestamp_micros: int, - labels: List[str], + labels: list[str], ) -> None: """called to start a new cell in a row.""" if self.current_key is None: diff --git a/google/cloud/bigtable/exceptions.py b/google/cloud/bigtable/data/exceptions.py similarity index 93% rename from google/cloud/bigtable/exceptions.py rename to google/cloud/bigtable/data/exceptions.py index fc4e368b9..9b6b4fe3f 100644 --- a/google/cloud/bigtable/exceptions.py +++ b/google/cloud/bigtable/data/exceptions.py @@ -19,13 +19,13 @@ from typing import Any, TYPE_CHECKING from google.api_core import exceptions as core_exceptions -from google.cloud.bigtable.row import Row +from google.cloud.bigtable.data.row import Row is_311_plus = sys.version_info >= (3, 11) if TYPE_CHECKING: - from google.cloud.bigtable.mutations import RowMutationEntry - from google.cloud.bigtable.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.mutations import RowMutationEntry + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery class IdleTimeout(core_exceptions.DeadlineExceeded): @@ -50,7 +50,15 @@ class _RowSetComplete(Exception): pass -class BigtableExceptionGroup(ExceptionGroup if is_311_plus else Exception): # type: ignore # noqa: F821 +class _MutateRowsIncomplete(RuntimeError): + """ + Exception raised when a mutate_rows call has unfinished work. + """ + + pass + + +class _BigtableExceptionGroup(ExceptionGroup if is_311_plus else Exception): # type: ignore # noqa: F821 """ Represents one or more exceptions that occur during a bulk Bigtable operation @@ -82,7 +90,7 @@ def __str__(self): return self.args[0] -class MutationsExceptionGroup(BigtableExceptionGroup): +class MutationsExceptionGroup(_BigtableExceptionGroup): """ Represents one or more exceptions that occur during a bulk mutation operation @@ -202,7 +210,7 @@ def __init__( self.__cause__ = cause -class RetryExceptionGroup(BigtableExceptionGroup): +class RetryExceptionGroup(_BigtableExceptionGroup): """Represents one or more exceptions that occur during a retryable operation""" @staticmethod @@ -221,7 +229,7 @@ def __new__(cls, excs: list[Exception]): return super().__new__(cls, cls._format_message(excs), excs) -class ShardedReadRowsExceptionGroup(BigtableExceptionGroup): +class ShardedReadRowsExceptionGroup(_BigtableExceptionGroup): """ Represents one or more exceptions that occur during a sharded read rows operation """ diff --git a/google/cloud/bigtable/mutations.py b/google/cloud/bigtable/data/mutations.py similarity index 98% rename from google/cloud/bigtable/mutations.py rename to google/cloud/bigtable/data/mutations.py index a4c02cd74..de1b3b137 100644 --- a/google/cloud/bigtable/mutations.py +++ b/google/cloud/bigtable/data/mutations.py @@ -19,16 +19,16 @@ from abc import ABC, abstractmethod from sys import getsizeof -# mutation entries above this should be rejected -from google.cloud.bigtable._mutate_rows import MUTATE_ROWS_REQUEST_MUTATION_LIMIT - - -from google.cloud.bigtable.read_modify_write_rules import MAX_INCREMENT_VALUE +from google.cloud.bigtable.data.read_modify_write_rules import MAX_INCREMENT_VALUE # special value for SetCell mutation timestamps. If set, server will assign a timestamp SERVER_SIDE_TIMESTAMP = -1 +# mutation entries above this should be rejected +MUTATE_ROWS_REQUEST_MUTATION_LIMIT = 100_000 + + class Mutation(ABC): """Model class for mutations""" diff --git a/google/cloud/bigtable/read_modify_write_rules.py b/google/cloud/bigtable/data/read_modify_write_rules.py similarity index 100% rename from google/cloud/bigtable/read_modify_write_rules.py rename to google/cloud/bigtable/data/read_modify_write_rules.py diff --git a/google/cloud/bigtable/read_rows_query.py b/google/cloud/bigtable/data/read_rows_query.py similarity index 98% rename from google/cloud/bigtable/read_rows_query.py rename to google/cloud/bigtable/data/read_rows_query.py index eb28eeda3..7d7e1f99f 100644 --- a/google/cloud/bigtable/read_rows_query.py +++ b/google/cloud/bigtable/data/read_rows_query.py @@ -18,11 +18,11 @@ from bisect import bisect_right from collections import defaultdict from dataclasses import dataclass -from google.cloud.bigtable.row_filters import RowFilter +from google.cloud.bigtable.data.row_filters import RowFilter if TYPE_CHECKING: - from google.cloud.bigtable import RowKeySamples - from google.cloud.bigtable import ShardedQuery + from google.cloud.bigtable.data import RowKeySamples + from google.cloud.bigtable.data import ShardedQuery @dataclass diff --git a/google/cloud/bigtable/data/row.py b/google/cloud/bigtable/data/row.py new file mode 100644 index 000000000..5fdc1b365 --- /dev/null +++ b/google/cloud/bigtable/data/row.py @@ -0,0 +1,465 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from collections import OrderedDict +from typing import Sequence, Generator, overload, Any +from functools import total_ordering + +from google.cloud.bigtable_v2.types import Row as RowPB + +# Type aliases used internally for readability. +_family_type = str +_qualifier_type = bytes + + +class Row(Sequence["Cell"]): + """ + Model class for row data returned from server + + Does not represent all data contained in the row, only data returned by a + query. + Expected to be read-only to users, and written by backend + + Can be indexed: + cells = row["family", "qualifier"] + """ + + __slots__ = ("row_key", "cells", "_index_data") + + def __init__( + self, + key: bytes, + cells: list[Cell], + ): + """ + Initializes a Row object + + Row objects are not intended to be created by users. + They are returned by the Bigtable backend. + """ + self.row_key = key + self.cells: list[Cell] = cells + # index is lazily created when needed + self._index_data: OrderedDict[ + _family_type, OrderedDict[_qualifier_type, list[Cell]] + ] | None = None + + @property + def _index( + self, + ) -> OrderedDict[_family_type, OrderedDict[_qualifier_type, list[Cell]]]: + """ + Returns an index of cells associated with each family and qualifier. + + The index is lazily created when needed + """ + if self._index_data is None: + self._index_data = OrderedDict() + for cell in self.cells: + self._index_data.setdefault(cell.family, OrderedDict()).setdefault( + cell.qualifier, [] + ).append(cell) + return self._index_data + + @classmethod + def _from_pb(cls, row_pb: RowPB) -> Row: + """ + Creates a row from a protobuf representation + + Row objects are not intended to be created by users. + They are returned by the Bigtable backend. + """ + row_key: bytes = row_pb.key + cell_list: list[Cell] = [] + for family in row_pb.families: + for column in family.columns: + for cell in column.cells: + new_cell = Cell( + value=cell.value, + row_key=row_key, + family=family.name, + qualifier=column.qualifier, + timestamp_micros=cell.timestamp_micros, + labels=list(cell.labels) if cell.labels else None, + ) + cell_list.append(new_cell) + return cls(row_key, cells=cell_list) + + def get_cells( + self, family: str | None = None, qualifier: str | bytes | None = None + ) -> list[Cell]: + """ + Returns cells sorted in Bigtable native order: + - Family lexicographically ascending + - Qualifier ascending + - Timestamp in reverse chronological order + + If family or qualifier not passed, will include all + + Can also be accessed through indexing: + cells = row["family", "qualifier"] + cells = row["family"] + """ + if family is None: + if qualifier is not None: + # get_cells(None, "qualifier") is not allowed + raise ValueError("Qualifier passed without family") + else: + # return all cells on get_cells() + return self.cells + if qualifier is None: + # return all cells in family on get_cells(family) + return list(self._get_all_from_family(family)) + if isinstance(qualifier, str): + qualifier = qualifier.encode("utf-8") + # return cells in family and qualifier on get_cells(family, qualifier) + if family not in self._index: + raise ValueError(f"Family '{family}' not found in row '{self.row_key!r}'") + if qualifier not in self._index[family]: + raise ValueError( + f"Qualifier '{qualifier!r}' not found in family '{family}' in row '{self.row_key!r}'" + ) + return self._index[family][qualifier] + + def _get_all_from_family(self, family: str) -> Generator[Cell, None, None]: + """ + Returns all cells in the row for the family_id + """ + if family not in self._index: + raise ValueError(f"Family '{family}' not found in row '{self.row_key!r}'") + for qualifier in self._index[family]: + yield from self._index[family][qualifier] + + def __str__(self) -> str: + """ + Human-readable string representation + + { + (family='fam', qualifier=b'col'): [b'value', (+1 more),], + (family='fam', qualifier=b'col2'): [b'other'], + } + """ + output = ["{"] + for family, qualifier in self.get_column_components(): + cell_list = self[family, qualifier] + line = [f" (family={family!r}, qualifier={qualifier!r}): "] + if len(cell_list) == 0: + line.append("[],") + elif len(cell_list) == 1: + line.append(f"[{cell_list[0]}],") + else: + line.append(f"[{cell_list[0]}, (+{len(cell_list)-1} more)],") + output.append("".join(line)) + output.append("}") + return "\n".join(output) + + def __repr__(self): + cell_str_buffer = ["{"] + for family, qualifier in self.get_column_components(): + cell_list = self[family, qualifier] + repr_list = [cell.to_dict() for cell in cell_list] + cell_str_buffer.append(f" ('{family}', {qualifier!r}): {repr_list},") + cell_str_buffer.append("}") + cell_str = "\n".join(cell_str_buffer) + output = f"Row(key={self.row_key!r}, cells={cell_str})" + return output + + def to_dict(self) -> dict[str, Any]: + """ + Returns a dictionary representation of the cell in the Bigtable Row + proto format + + https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#row + """ + family_list = [] + for family_name, qualifier_dict in self._index.items(): + qualifier_list = [] + for qualifier_name, cell_list in qualifier_dict.items(): + cell_dicts = [cell.to_dict() for cell in cell_list] + qualifier_list.append( + {"qualifier": qualifier_name, "cells": cell_dicts} + ) + family_list.append({"name": family_name, "columns": qualifier_list}) + return {"key": self.row_key, "families": family_list} + + # Sequence and Mapping methods + def __iter__(self): + """ + Allow iterating over all cells in the row + """ + return iter(self.cells) + + def __contains__(self, item): + """ + Implements `in` operator + + Works for both cells in the internal list, and `family` or + `(family, qualifier)` pairs associated with the cells + """ + if isinstance(item, _family_type): + return item in self._index + elif ( + isinstance(item, tuple) + and isinstance(item[0], _family_type) + and isinstance(item[1], (bytes, str)) + ): + q = item[1] if isinstance(item[1], bytes) else item[1].encode("utf-8") + return item[0] in self._index and q in self._index[item[0]] + # check if Cell is in Row + return item in self.cells + + @overload + def __getitem__( + self, + index: str | tuple[str, bytes | str], + ) -> list[Cell]: + # overload signature for type checking + pass + + @overload + def __getitem__(self, index: int) -> Cell: + # overload signature for type checking + pass + + @overload + def __getitem__(self, index: slice) -> list[Cell]: + # overload signature for type checking + pass + + def __getitem__(self, index): + """ + Implements [] indexing + + Supports indexing by family, (family, qualifier) pair, + numerical index, and index slicing + """ + if isinstance(index, _family_type): + return self.get_cells(family=index) + elif ( + isinstance(index, tuple) + and isinstance(index[0], _family_type) + and isinstance(index[1], (bytes, str)) + ): + return self.get_cells(family=index[0], qualifier=index[1]) + elif isinstance(index, int) or isinstance(index, slice): + # index is int or slice + return self.cells[index] + else: + raise TypeError( + "Index must be family_id, (family_id, qualifier), int, or slice" + ) + + def __len__(self): + """ + Implements `len()` operator + """ + return len(self.cells) + + def get_column_components(self) -> list[tuple[str, bytes]]: + """ + Returns a list of (family, qualifier) pairs associated with the cells + + Pairs can be used for indexing + """ + return [(f, q) for f in self._index for q in self._index[f]] + + def __eq__(self, other): + """ + Implements `==` operator + """ + # for performance reasons, check row metadata + # before checking individual cells + if not isinstance(other, Row): + return False + if self.row_key != other.row_key: + return False + if len(self.cells) != len(other.cells): + return False + components = self.get_column_components() + other_components = other.get_column_components() + if len(components) != len(other_components): + return False + if components != other_components: + return False + for family, qualifier in components: + if len(self[family, qualifier]) != len(other[family, qualifier]): + return False + # compare individual cell lists + if self.cells != other.cells: + return False + return True + + def __ne__(self, other) -> bool: + """ + Implements `!=` operator + """ + return not self == other + + +class _LastScannedRow(Row): + """A value used to indicate a scanned row that is not returned as part of + a query. + + This is used internally to indicate progress in a scan, and improve retry + performance. It is not intended to be used directly by users. + """ + + def __init__(self, row_key): + super().__init__(row_key, []) + + def __eq__(self, other): + return isinstance(other, _LastScannedRow) + + +@total_ordering +class Cell: + """ + Model class for cell data + + Does not represent all data contained in the cell, only data returned by a + query. + Expected to be read-only to users, and written by backend + """ + + __slots__ = ( + "value", + "row_key", + "family", + "qualifier", + "timestamp_micros", + "labels", + ) + + def __init__( + self, + value: bytes, + row_key: bytes, + family: str, + qualifier: bytes | str, + timestamp_micros: int, + labels: list[str] | None = None, + ): + """ + Cell constructor + + Cell objects are not intended to be constructed by users. + They are returned by the Bigtable backend. + """ + self.value = value + self.row_key = row_key + self.family = family + if isinstance(qualifier, str): + qualifier = qualifier.encode() + self.qualifier = qualifier + self.timestamp_micros = timestamp_micros + self.labels = labels if labels is not None else [] + + def __int__(self) -> int: + """ + Allows casting cell to int + Interprets value as a 64-bit big-endian signed integer, as expected by + ReadModifyWrite increment rule + """ + return int.from_bytes(self.value, byteorder="big", signed=True) + + def to_dict(self) -> dict[str, Any]: + """ + Returns a dictionary representation of the cell in the Bigtable Cell + proto format + + https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#cell + """ + cell_dict: dict[str, Any] = { + "value": self.value, + } + cell_dict["timestamp_micros"] = self.timestamp_micros + if self.labels: + cell_dict["labels"] = self.labels + return cell_dict + + def __str__(self) -> str: + """ + Allows casting cell to str + Prints encoded byte string, same as printing value directly. + """ + return str(self.value) + + def __repr__(self): + """ + Returns a string representation of the cell + """ + return f"Cell(value={self.value!r}, row_key={self.row_key!r}, family='{self.family}', qualifier={self.qualifier!r}, timestamp_micros={self.timestamp_micros}, labels={self.labels})" + + """For Bigtable native ordering""" + + def __lt__(self, other) -> bool: + """ + Implements `<` operator + """ + if not isinstance(other, Cell): + return NotImplemented + this_ordering = ( + self.family, + self.qualifier, + -self.timestamp_micros, + self.value, + self.labels, + ) + other_ordering = ( + other.family, + other.qualifier, + -other.timestamp_micros, + other.value, + other.labels, + ) + return this_ordering < other_ordering + + def __eq__(self, other) -> bool: + """ + Implements `==` operator + """ + if not isinstance(other, Cell): + return NotImplemented + return ( + self.row_key == other.row_key + and self.family == other.family + and self.qualifier == other.qualifier + and self.value == other.value + and self.timestamp_micros == other.timestamp_micros + and len(self.labels) == len(other.labels) + and all([label in other.labels for label in self.labels]) + ) + + def __ne__(self, other) -> bool: + """ + Implements `!=` operator + """ + return not self == other + + def __hash__(self): + """ + Implements `hash()` function to fingerprint cell + """ + return hash( + ( + self.row_key, + self.family, + self.qualifier, + self.value, + self.timestamp_micros, + tuple(self.labels), + ) + ) diff --git a/google/cloud/bigtable/deprecated/row_filters.py b/google/cloud/bigtable/data/row_filters.py similarity index 58% rename from google/cloud/bigtable/deprecated/row_filters.py rename to google/cloud/bigtable/data/row_filters.py index 53192acc8..b2fae6971 100644 --- a/google/cloud/bigtable/deprecated/row_filters.py +++ b/google/cloud/bigtable/data/row_filters.py @@ -13,18 +13,25 @@ # limitations under the License. """Filters for Google Cloud Bigtable Row classes.""" +from __future__ import annotations import struct +from typing import Any, Sequence, TYPE_CHECKING, overload +from abc import ABC, abstractmethod from google.cloud._helpers import _microseconds_from_datetime # type: ignore from google.cloud._helpers import _to_bytes # type: ignore from google.cloud.bigtable_v2.types import data as data_v2_pb2 +if TYPE_CHECKING: + # import dependencies when type checking + from datetime import datetime + _PACK_I64 = struct.Struct(">q").pack -class RowFilter(object): +class RowFilter(ABC): """Basic filter to apply to cells in a row. These values can be combined via :class:`RowFilterChain`, @@ -35,15 +42,30 @@ class RowFilter(object): This class is a do-nothing base class for all row filters. """ + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + Returns: The converted current object. + """ + return data_v2_pb2.RowFilter(**self.to_dict()) + + @abstractmethod + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + pass + + def __repr__(self) -> str: + return f"{self.__class__.__name__}()" + -class _BoolFilter(RowFilter): +class _BoolFilter(RowFilter, ABC): """Row filter that uses a boolean flag. :type flag: bool :param flag: An indicator if a setting is turned on or off. """ - def __init__(self, flag): + def __init__(self, flag: bool): self.flag = flag def __eq__(self, other): @@ -54,6 +76,9 @@ def __eq__(self, other): def __ne__(self, other): return not self == other + def __repr__(self) -> str: + return f"{self.__class__.__name__}(flag={self.flag})" + class SinkFilter(_BoolFilter): """Advanced row filter to skip parent filters. @@ -66,13 +91,9 @@ class SinkFilter(_BoolFilter): of a :class:`ConditionalRowFilter`. """ - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(sink=self.flag) + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"sink": self.flag} class PassAllFilter(_BoolFilter): @@ -84,13 +105,9 @@ class PassAllFilter(_BoolFilter): completeness. """ - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(pass_all_filter=self.flag) + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"pass_all_filter": self.flag} class BlockAllFilter(_BoolFilter): @@ -101,16 +118,12 @@ class BlockAllFilter(_BoolFilter): temporarily disabling just part of a filter. """ - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(block_all_filter=self.flag) + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"block_all_filter": self.flag} -class _RegexFilter(RowFilter): +class _RegexFilter(RowFilter, ABC): """Row filter that uses a regular expression. The ``regex`` must be valid RE2 patterns. See Google's @@ -124,8 +137,8 @@ class _RegexFilter(RowFilter): will be encoded as ASCII. """ - def __init__(self, regex): - self.regex = _to_bytes(regex) + def __init__(self, regex: str | bytes): + self.regex: bytes = _to_bytes(regex) def __eq__(self, other): if not isinstance(other, self.__class__): @@ -135,6 +148,9 @@ def __eq__(self, other): def __ne__(self, other): return not self == other + def __repr__(self) -> str: + return f"{self.__class__.__name__}(regex={self.regex!r})" + class RowKeyRegexFilter(_RegexFilter): """Row filter for a row key regular expression. @@ -159,13 +175,9 @@ class RowKeyRegexFilter(_RegexFilter): since the row key is already specified. """ - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(row_key_regex_filter=self.regex) + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"row_key_regex_filter": self.regex} class RowSampleFilter(RowFilter): @@ -176,8 +188,8 @@ class RowSampleFilter(RowFilter): interval ``(0, 1)`` The end points are excluded). """ - def __init__(self, sample): - self.sample = sample + def __init__(self, sample: float): + self.sample: float = sample def __eq__(self, other): if not isinstance(other, self.__class__): @@ -187,13 +199,12 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def to_pb(self): - """Converts the row filter to a protobuf. + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"row_sample_filter": self.sample} - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(row_sample_filter=self.sample) + def __repr__(self) -> str: + return f"{self.__class__.__name__}(sample={self.sample})" class FamilyNameRegexFilter(_RegexFilter): @@ -211,13 +222,9 @@ class FamilyNameRegexFilter(_RegexFilter): used as a literal. """ - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(family_name_regex_filter=self.regex) + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"family_name_regex_filter": self.regex} class ColumnQualifierRegexFilter(_RegexFilter): @@ -241,13 +248,9 @@ class ColumnQualifierRegexFilter(_RegexFilter): match this regex (irrespective of column family). """ - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(column_qualifier_regex_filter=self.regex) + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"column_qualifier_regex_filter": self.regex} class TimestampRange(object): @@ -262,9 +265,9 @@ class TimestampRange(object): range. If omitted, no upper bound is used. """ - def __init__(self, start=None, end=None): - self.start = start - self.end = end + def __init__(self, start: "datetime" | None = None, end: "datetime" | None = None): + self.start: "datetime" | None = start + self.end: "datetime" | None = end def __eq__(self, other): if not isinstance(other, self.__class__): @@ -274,23 +277,29 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def to_pb(self): + def _to_pb(self) -> data_v2_pb2.TimestampRange: """Converts the :class:`TimestampRange` to a protobuf. - :rtype: :class:`.data_v2_pb2.TimestampRange` - :returns: The converted current object. + Returns: The converted current object. """ + return data_v2_pb2.TimestampRange(**self.to_dict()) + + def to_dict(self) -> dict[str, int]: + """Converts the timestamp range to a dict representation.""" timestamp_range_kwargs = {} if self.start is not None: - timestamp_range_kwargs["start_timestamp_micros"] = ( - _microseconds_from_datetime(self.start) // 1000 * 1000 - ) + start_time = _microseconds_from_datetime(self.start) // 1000 * 1000 + timestamp_range_kwargs["start_timestamp_micros"] = start_time if self.end is not None: end_time = _microseconds_from_datetime(self.end) if end_time % 1000 != 0: + # if not a whole milisecond value, round up end_time = end_time // 1000 * 1000 + 1000 timestamp_range_kwargs["end_timestamp_micros"] = end_time - return data_v2_pb2.TimestampRange(**timestamp_range_kwargs) + return timestamp_range_kwargs + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(start={self.start}, end={self.end})" class TimestampRangeFilter(RowFilter): @@ -300,8 +309,8 @@ class TimestampRangeFilter(RowFilter): :param range_: Range of time that cells should match against. """ - def __init__(self, range_): - self.range_ = range_ + def __init__(self, start: "datetime" | None = None, end: "datetime" | None = None): + self.range_: TimestampRange = TimestampRange(start, end) def __eq__(self, other): if not isinstance(other, self.__class__): @@ -311,16 +320,22 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def to_pb(self): + def _to_pb(self) -> data_v2_pb2.RowFilter: """Converts the row filter to a protobuf. First converts the ``range_`` on the current object to a protobuf and then uses it in the ``timestamp_range_filter`` field. - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. + Returns: The converted current object. """ - return data_v2_pb2.RowFilter(timestamp_range_filter=self.range_.to_pb()) + return data_v2_pb2.RowFilter(timestamp_range_filter=self.range_._to_pb()) + + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"timestamp_range_filter": self.range_.to_dict()} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(start={self.range_.start!r}, end={self.range_.end!r})" class ColumnRangeFilter(RowFilter): @@ -330,71 +345,72 @@ class ColumnRangeFilter(RowFilter): By default, we include them both, but this can be changed with optional flags. - :type column_family_id: str - :param column_family_id: The column family that contains the columns. Must + :type family_id: str + :param family_id: The column family that contains the columns. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - :type start_column: bytes - :param start_column: The start of the range of columns. If no value is + :type start_qualifier: bytes + :param start_qualifier: The start of the range of columns. If no value is used, the backend applies no upper bound to the values. - :type end_column: bytes - :param end_column: The end of the range of columns. If no value is used, + :type end_qualifier: bytes + :param end_qualifier: The end of the range of columns. If no value is used, the backend applies no upper bound to the values. :type inclusive_start: bool :param inclusive_start: Boolean indicating if the start column should be included in the range (or excluded). Defaults - to :data:`True` if ``start_column`` is passed and + to :data:`True` if ``start_qualifier`` is passed and no ``inclusive_start`` was given. :type inclusive_end: bool :param inclusive_end: Boolean indicating if the end column should be included in the range (or excluded). Defaults - to :data:`True` if ``end_column`` is passed and + to :data:`True` if ``end_qualifier`` is passed and no ``inclusive_end`` was given. :raises: :class:`ValueError ` if ``inclusive_start`` - is set but no ``start_column`` is given or if ``inclusive_end`` - is set but no ``end_column`` is given + is set but no ``start_qualifier`` is given or if ``inclusive_end`` + is set but no ``end_qualifier`` is given """ def __init__( self, - column_family_id, - start_column=None, - end_column=None, - inclusive_start=None, - inclusive_end=None, + family_id: str, + start_qualifier: bytes | None = None, + end_qualifier: bytes | None = None, + inclusive_start: bool | None = None, + inclusive_end: bool | None = None, ): - self.column_family_id = column_family_id - if inclusive_start is None: inclusive_start = True - elif start_column is None: + elif start_qualifier is None: raise ValueError( - "Inclusive start was specified but no " "start column was given." + "inclusive_start was specified but no start_qualifier was given." ) - self.start_column = start_column - self.inclusive_start = inclusive_start - if inclusive_end is None: inclusive_end = True - elif end_column is None: + elif end_qualifier is None: raise ValueError( - "Inclusive end was specified but no " "end column was given." + "inclusive_end was specified but no end_qualifier was given." ) - self.end_column = end_column + + self.family_id = family_id + + self.start_qualifier = start_qualifier + self.inclusive_start = inclusive_start + + self.end_qualifier = end_qualifier self.inclusive_end = inclusive_end def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return ( - other.column_family_id == self.column_family_id - and other.start_column == self.start_column - and other.end_column == self.end_column + other.family_id == self.family_id + and other.start_qualifier == self.start_qualifier + and other.end_qualifier == self.end_qualifier and other.inclusive_start == self.inclusive_start and other.inclusive_end == self.inclusive_end ) @@ -402,31 +418,41 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def to_pb(self): + def _to_pb(self) -> data_v2_pb2.RowFilter: """Converts the row filter to a protobuf. First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it in the ``column_range_filter`` field. - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. + Returns: The converted current object. """ - column_range_kwargs = {"family_name": self.column_family_id} - if self.start_column is not None: + column_range = data_v2_pb2.ColumnRange(**self.range_to_dict()) + return data_v2_pb2.RowFilter(column_range_filter=column_range) + + def range_to_dict(self) -> dict[str, str | bytes]: + """Converts the column range range to a dict representation.""" + column_range_kwargs: dict[str, str | bytes] = {} + column_range_kwargs["family_name"] = self.family_id + if self.start_qualifier is not None: if self.inclusive_start: key = "start_qualifier_closed" else: key = "start_qualifier_open" - column_range_kwargs[key] = _to_bytes(self.start_column) - if self.end_column is not None: + column_range_kwargs[key] = _to_bytes(self.start_qualifier) + if self.end_qualifier is not None: if self.inclusive_end: key = "end_qualifier_closed" else: key = "end_qualifier_open" - column_range_kwargs[key] = _to_bytes(self.end_column) + column_range_kwargs[key] = _to_bytes(self.end_qualifier) + return column_range_kwargs - column_range = data_v2_pb2.ColumnRange(**column_range_kwargs) - return data_v2_pb2.RowFilter(column_range_filter=column_range) + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"column_range_filter": self.range_to_dict()} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(family_id='{self.family_id}', start_qualifier={self.start_qualifier!r}, end_qualifier={self.end_qualifier!r}, inclusive_start={self.inclusive_start}, inclusive_end={self.inclusive_end})" class ValueRegexFilter(_RegexFilter): @@ -450,29 +476,64 @@ class ValueRegexFilter(_RegexFilter): match this regex. String values will be encoded as ASCII. """ - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(value_regex_filter=self.regex) + def to_dict(self) -> dict[str, bytes]: + """Converts the row filter to a dict representation.""" + return {"value_regex_filter": self.regex} -class ExactValueFilter(ValueRegexFilter): +class LiteralValueFilter(ValueRegexFilter): """Row filter for an exact value. :type value: bytes or str or int :param value: - a literal string encodable as ASCII, or the - equivalent bytes, or an integer (which will be packed into 8-bytes). + a literal string, integer, or the equivalent bytes. + Integer values will be packed into signed 8-bytes. """ - def __init__(self, value): + def __init__(self, value: bytes | str | int): if isinstance(value, int): value = _PACK_I64(value) - super(ExactValueFilter, self).__init__(value) + elif isinstance(value, str): + value = value.encode("utf-8") + value = self._write_literal_regex(value) + super(LiteralValueFilter, self).__init__(value) + + @staticmethod + def _write_literal_regex(input_bytes: bytes) -> bytes: + """ + Escape re2 special characters from literal bytes. + + Extracted from: re2 QuoteMeta: + https://github.com/google/re2/blob/70f66454c255080a54a8da806c52d1f618707f8a/re2/re2.cc#L456 + """ + result = bytearray() + for byte in input_bytes: + # If this is the part of a UTF8 or Latin1 character, we need \ + # to copy this byte without escaping. Experimentally this is \ + # what works correctly with the regexp library. \ + utf8_latin1_check = (byte & 128) == 0 + if ( + (byte < ord("a") or byte > ord("z")) + and (byte < ord("A") or byte > ord("Z")) + and (byte < ord("0") or byte > ord("9")) + and byte != ord("_") + and utf8_latin1_check + ): + if byte == 0: + # Special handling for null chars. + # Note that this special handling is not strictly required for RE2, + # but this quoting is required for other regexp libraries such as + # PCRE. + # Can't use "\\0" since the next character might be a digit. + result.extend([ord("\\"), ord("x"), ord("0"), ord("0")]) + continue + result.append(ord(b"\\")) + result.append(byte) + return bytes(result) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(value={self.regex!r})" class ValueRangeFilter(RowFilter): @@ -510,25 +571,29 @@ class ValueRangeFilter(RowFilter): """ def __init__( - self, start_value=None, end_value=None, inclusive_start=None, inclusive_end=None + self, + start_value: bytes | int | None = None, + end_value: bytes | int | None = None, + inclusive_start: bool | None = None, + inclusive_end: bool | None = None, ): if inclusive_start is None: inclusive_start = True elif start_value is None: raise ValueError( - "Inclusive start was specified but no " "start value was given." + "inclusive_start was specified but no start_value was given." ) - if isinstance(start_value, int): - start_value = _PACK_I64(start_value) - self.start_value = start_value - self.inclusive_start = inclusive_start - if inclusive_end is None: inclusive_end = True elif end_value is None: raise ValueError( - "Inclusive end was specified but no " "end value was given." + "inclusive_end was specified but no end_qualifier was given." ) + if isinstance(start_value, int): + start_value = _PACK_I64(start_value) + self.start_value = start_value + self.inclusive_start = inclusive_start + if isinstance(end_value, int): end_value = _PACK_I64(end_value) self.end_value = end_value @@ -547,15 +612,19 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def to_pb(self): + def _to_pb(self) -> data_v2_pb2.RowFilter: """Converts the row filter to a protobuf. First converts to a :class:`.data_v2_pb2.ValueRange` and then uses it to create a row filter protobuf. - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. + Returns: The converted current object. """ + value_range = data_v2_pb2.ValueRange(**self.range_to_dict()) + return data_v2_pb2.RowFilter(value_range_filter=value_range) + + def range_to_dict(self) -> dict[str, bytes]: + """Converts the value range range to a dict representation.""" value_range_kwargs = {} if self.start_value is not None: if self.inclusive_start: @@ -569,12 +638,17 @@ def to_pb(self): else: key = "end_value_open" value_range_kwargs[key] = _to_bytes(self.end_value) + return value_range_kwargs - value_range = data_v2_pb2.ValueRange(**value_range_kwargs) - return data_v2_pb2.RowFilter(value_range_filter=value_range) + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"value_range_filter": self.range_to_dict()} + def __repr__(self) -> str: + return f"{self.__class__.__name__}(start_value={self.start_value!r}, end_value={self.end_value!r}, inclusive_start={self.inclusive_start}, inclusive_end={self.inclusive_end})" -class _CellCountFilter(RowFilter): + +class _CellCountFilter(RowFilter, ABC): """Row filter that uses an integer count of cells. The cell count is used as an offset or a limit for the number @@ -584,7 +658,7 @@ class _CellCountFilter(RowFilter): :param num_cells: An integer count / offset / limit. """ - def __init__(self, num_cells): + def __init__(self, num_cells: int): self.num_cells = num_cells def __eq__(self, other): @@ -595,6 +669,9 @@ def __eq__(self, other): def __ne__(self, other): return not self == other + def __repr__(self) -> str: + return f"{self.__class__.__name__}(num_cells={self.num_cells})" + class CellsRowOffsetFilter(_CellCountFilter): """Row filter to skip cells in a row. @@ -603,13 +680,9 @@ class CellsRowOffsetFilter(_CellCountFilter): :param num_cells: Skips the first N cells of the row. """ - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(cells_per_row_offset_filter=self.num_cells) + def to_dict(self) -> dict[str, int]: + """Converts the row filter to a dict representation.""" + return {"cells_per_row_offset_filter": self.num_cells} class CellsRowLimitFilter(_CellCountFilter): @@ -619,13 +692,9 @@ class CellsRowLimitFilter(_CellCountFilter): :param num_cells: Matches only the first N cells of the row. """ - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells) + def to_dict(self) -> dict[str, int]: + """Converts the row filter to a dict representation.""" + return {"cells_per_row_limit_filter": self.num_cells} class CellsColumnLimitFilter(_CellCountFilter): @@ -637,13 +706,9 @@ class CellsColumnLimitFilter(_CellCountFilter): timestamps of each cell. """ - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(cells_per_column_limit_filter=self.num_cells) + def to_dict(self) -> dict[str, int]: + """Converts the row filter to a dict representation.""" + return {"cells_per_column_limit_filter": self.num_cells} class StripValueTransformerFilter(_BoolFilter): @@ -655,13 +720,9 @@ class StripValueTransformerFilter(_BoolFilter): transformer than a generic query / filter. """ - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(strip_value_transformer=self.flag) + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"strip_value_transformer": self.flag} class ApplyLabelFilter(RowFilter): @@ -683,7 +744,7 @@ class ApplyLabelFilter(RowFilter): ``[a-z0-9\\-]+``. """ - def __init__(self, label): + def __init__(self, label: str): self.label = label def __eq__(self, other): @@ -694,16 +755,15 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def to_pb(self): - """Converts the row filter to a protobuf. + def to_dict(self) -> dict[str, str]: + """Converts the row filter to a dict representation.""" + return {"apply_label_transformer": self.label} - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(apply_label_transformer=self.label) + def __repr__(self) -> str: + return f"{self.__class__.__name__}(label={self.label})" -class _FilterCombination(RowFilter): +class _FilterCombination(RowFilter, Sequence[RowFilter], ABC): """Chain of row filters. Sends rows through several filters in sequence. The filters are "chained" @@ -714,10 +774,10 @@ class _FilterCombination(RowFilter): :param filters: List of :class:`RowFilter` """ - def __init__(self, filters=None): + def __init__(self, filters: list[RowFilter] | None = None): if filters is None: filters = [] - self.filters = filters + self.filters: list[RowFilter] = filters def __eq__(self, other): if not isinstance(other, self.__class__): @@ -727,6 +787,38 @@ def __eq__(self, other): def __ne__(self, other): return not self == other + def __len__(self) -> int: + return len(self.filters) + + @overload + def __getitem__(self, index: int) -> RowFilter: + # overload signature for type checking + pass + + @overload + def __getitem__(self, index: slice) -> list[RowFilter]: + # overload signature for type checking + pass + + def __getitem__(self, index): + return self.filters[index] + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(filters={self.filters})" + + def __str__(self) -> str: + """ + Returns a string representation of the filter chain. + + Adds line breaks between each sub-filter for readability. + """ + output = [f"{self.__class__.__name__}(["] + for filter_ in self.filters: + filter_lines = f"{filter_},".splitlines() + output.extend([f" {line}" for line in filter_lines]) + output.append("])") + return "\n".join(output) + class RowFilterChain(_FilterCombination): """Chain of row filters. @@ -739,17 +831,20 @@ class RowFilterChain(_FilterCombination): :param filters: List of :class:`RowFilter` """ - def to_pb(self): + def _to_pb(self) -> data_v2_pb2.RowFilter: """Converts the row filter to a protobuf. - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. + Returns: The converted current object. """ chain = data_v2_pb2.RowFilter.Chain( - filters=[row_filter.to_pb() for row_filter in self.filters] + filters=[row_filter._to_pb() for row_filter in self.filters] ) return data_v2_pb2.RowFilter(chain=chain) + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"chain": {"filters": [f.to_dict() for f in self.filters]}} + class RowFilterUnion(_FilterCombination): """Union of row filters. @@ -764,50 +859,58 @@ class RowFilterUnion(_FilterCombination): :param filters: List of :class:`RowFilter` """ - def to_pb(self): + def _to_pb(self) -> data_v2_pb2.RowFilter: """Converts the row filter to a protobuf. - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. + Returns: The converted current object. """ interleave = data_v2_pb2.RowFilter.Interleave( - filters=[row_filter.to_pb() for row_filter in self.filters] + filters=[row_filter._to_pb() for row_filter in self.filters] ) return data_v2_pb2.RowFilter(interleave=interleave) + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"interleave": {"filters": [f.to_dict() for f in self.filters]}} + class ConditionalRowFilter(RowFilter): """Conditional row filter which exhibits ternary behavior. - Executes one of two filters based on another filter. If the ``base_filter`` + Executes one of two filters based on another filter. If the ``predicate_filter`` returns any cells in the row, then ``true_filter`` is executed. If not, then ``false_filter`` is executed. .. note:: - The ``base_filter`` does not execute atomically with the true and false + The ``predicate_filter`` does not execute atomically with the true and false filters, which may lead to inconsistent or unexpected results. Additionally, executing a :class:`ConditionalRowFilter` has poor performance on the server, especially when ``false_filter`` is set. - :type base_filter: :class:`RowFilter` - :param base_filter: The filter to condition on before executing the + :type predicate_filter: :class:`RowFilter` + :param predicate_filter: The filter to condition on before executing the true/false filters. :type true_filter: :class:`RowFilter` :param true_filter: (Optional) The filter to execute if there are any cells - matching ``base_filter``. If not provided, no results + matching ``predicate_filter``. If not provided, no results will be returned in the true case. :type false_filter: :class:`RowFilter` :param false_filter: (Optional) The filter to execute if there are no cells - matching ``base_filter``. If not provided, no results + matching ``predicate_filter``. If not provided, no results will be returned in the false case. """ - def __init__(self, base_filter, true_filter=None, false_filter=None): - self.base_filter = base_filter + def __init__( + self, + predicate_filter: RowFilter, + true_filter: RowFilter | None = None, + false_filter: RowFilter | None = None, + ): + self.predicate_filter = predicate_filter self.true_filter = true_filter self.false_filter = false_filter @@ -815,7 +918,7 @@ def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return ( - other.base_filter == self.base_filter + other.predicate_filter == self.predicate_filter and other.true_filter == self.true_filter and other.false_filter == self.false_filter ) @@ -823,16 +926,43 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def to_pb(self): + def _to_pb(self) -> data_v2_pb2.RowFilter: """Converts the row filter to a protobuf. - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. + Returns: The converted current object. """ - condition_kwargs = {"predicate_filter": self.base_filter.to_pb()} + condition_kwargs = {"predicate_filter": self.predicate_filter._to_pb()} if self.true_filter is not None: - condition_kwargs["true_filter"] = self.true_filter.to_pb() + condition_kwargs["true_filter"] = self.true_filter._to_pb() if self.false_filter is not None: - condition_kwargs["false_filter"] = self.false_filter.to_pb() + condition_kwargs["false_filter"] = self.false_filter._to_pb() condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs) return data_v2_pb2.RowFilter(condition=condition) + + def condition_to_dict(self) -> dict[str, Any]: + """Converts the condition to a dict representation.""" + condition_kwargs = {"predicate_filter": self.predicate_filter.to_dict()} + if self.true_filter is not None: + condition_kwargs["true_filter"] = self.true_filter.to_dict() + if self.false_filter is not None: + condition_kwargs["false_filter"] = self.false_filter.to_dict() + return condition_kwargs + + def to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"condition": self.condition_to_dict()} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(predicate_filter={self.predicate_filter!r}, true_filter={self.true_filter!r}, false_filter={self.false_filter!r})" + + def __str__(self) -> str: + output = [f"{self.__class__.__name__}("] + for filter_type in ("predicate_filter", "true_filter", "false_filter"): + filter_ = getattr(self, filter_type) + if filter_ is None: + continue + # add the new filter set, adding indentations for readability + filter_lines = f"{filter_type}={filter_},".splitlines() + output.extend(f" {line}" for line in filter_lines) + output.append(")") + return "\n".join(output) diff --git a/google/cloud/bigtable/deprecated/batcher.py b/google/cloud/bigtable/deprecated/batcher.py deleted file mode 100644 index 58cf6b6e3..000000000 --- a/google/cloud/bigtable/deprecated/batcher.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User friendly container for Google Cloud Bigtable MutationBatcher.""" - - -FLUSH_COUNT = 1000 -MAX_MUTATIONS = 100000 -MAX_ROW_BYTES = 5242880 # 5MB - - -class MaxMutationsError(ValueError): - """The number of mutations for bulk request is too big.""" - - -class MutationsBatcher(object): - """A MutationsBatcher is used in batch cases where the number of mutations - is large or unknown. It will store DirectRows in memory until one of the - size limits is reached, or an explicit call to flush() is performed. When - a flush event occurs, the DirectRows in memory will be sent to Cloud - Bigtable. Batching mutations is more efficient than sending individual - request. - - This class is not suited for usage in systems where each mutation - must be guaranteed to be sent, since calling mutate may only result in an - in-memory change. In a case of a system crash, any DirectRows remaining in - memory will not necessarily be sent to the service, even after the - completion of the mutate() method. - - TODO: Performance would dramatically improve if this class had the - capability of asynchronous, parallel RPCs. - - :type table: class - :param table: class:`~google.cloud.bigtable.deprecated.table.Table`. - - :type flush_count: int - :param flush_count: (Optional) Max number of rows to flush. If it - reaches the max number of rows it calls finish_batch() to mutate the - current row batch. Default is FLUSH_COUNT (1000 rows). - - :type max_row_bytes: int - :param max_row_bytes: (Optional) Max number of row mutations size to - flush. If it reaches the max number of row mutations size it calls - finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES - (5 MB). - """ - - def __init__(self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): - self.rows = [] - self.total_mutation_count = 0 - self.total_size = 0 - self.table = table - self.flush_count = flush_count - self.max_row_bytes = max_row_bytes - - def mutate(self, row): - """Add a row to the batch. If the current batch meets one of the size - limits, the batch is sent synchronously. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_api_batcher_mutate] - :end-before: [END bigtable_api_batcher_mutate] - :dedent: 4 - - :type row: class - :param row: class:`~google.cloud.bigtable.deprecated.row.DirectRow`. - - :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any - row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't - match the number of rows that were retried - * :exc:`.batcher.MaxMutationsError` if any row exceeds max - mutations count. - """ - mutation_count = len(row._get_mutations()) - if mutation_count > MAX_MUTATIONS: - raise MaxMutationsError( - "The row key {} exceeds the number of mutations {}.".format( - row.row_key, mutation_count - ) - ) - - if (self.total_mutation_count + mutation_count) >= MAX_MUTATIONS: - self.flush() - - self.rows.append(row) - self.total_mutation_count += mutation_count - self.total_size += row.get_mutations_size() - - if self.total_size >= self.max_row_bytes or len(self.rows) >= self.flush_count: - self.flush() - - def mutate_rows(self, rows): - """Add multiple rows to the batch. If the current batch meets one of the size - limits, the batch is sent synchronously. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_api_batcher_mutate_rows] - :end-before: [END bigtable_api_batcher_mutate_rows] - :dedent: 4 - - :type rows: list:[`~google.cloud.bigtable.deprecated.row.DirectRow`] - :param rows: list:[`~google.cloud.bigtable.deprecated.row.DirectRow`]. - - :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any - row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't - match the number of rows that were retried - * :exc:`.batcher.MaxMutationsError` if any row exceeds max - mutations count. - """ - for row in rows: - self.mutate(row) - - def flush(self): - """Sends the current. batch to Cloud Bigtable. - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_api_batcher_flush] - :end-before: [END bigtable_api_batcher_flush] - :dedent: 4 - - """ - if len(self.rows) != 0: - self.table.mutate_rows(self.rows) - self.total_mutation_count = 0 - self.total_size = 0 - self.rows = [] diff --git a/google/cloud/bigtable/deprecated/client.py b/google/cloud/bigtable/deprecated/client.py deleted file mode 100644 index c13e5f0da..000000000 --- a/google/cloud/bigtable/deprecated/client.py +++ /dev/null @@ -1,521 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Parent client for calling the Google Cloud Bigtable API. - -This is the base from which all interactions with the API occur. - -In the hierarchy of API concepts - -* a :class:`~google.cloud.bigtable.deprecated.client.Client` owns an - :class:`~google.cloud.bigtable.deprecated.instance.Instance` -* an :class:`~google.cloud.bigtable.deprecated.instance.Instance` owns a - :class:`~google.cloud.bigtable.deprecated.table.Table` -* a :class:`~google.cloud.bigtable.deprecated.table.Table` owns a - :class:`~.column_family.ColumnFamily` -* a :class:`~google.cloud.bigtable.deprecated.table.Table` owns a - :class:`~google.cloud.bigtable.deprecated.row.Row` (and all the cells in the row) -""" -import os -import warnings -import grpc # type: ignore - -from google.api_core.gapic_v1 import client_info as client_info_lib -import google.auth # type: ignore -from google.auth.credentials import AnonymousCredentials # type: ignore - -from google.cloud import bigtable_v2 -from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports import ( - BigtableInstanceAdminGrpcTransport, -) -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports import ( - BigtableTableAdminGrpcTransport, -) - -from google.cloud import bigtable -from google.cloud.bigtable.deprecated.instance import Instance -from google.cloud.bigtable.deprecated.cluster import Cluster - -from google.cloud.client import ClientWithProject # type: ignore - -from google.cloud.bigtable_admin_v2.types import instance -from google.cloud.bigtable.deprecated.cluster import _CLUSTER_NAME_RE -from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore - - -INSTANCE_TYPE_PRODUCTION = instance.Instance.Type.PRODUCTION -INSTANCE_TYPE_DEVELOPMENT = instance.Instance.Type.DEVELOPMENT -INSTANCE_TYPE_UNSPECIFIED = instance.Instance.Type.TYPE_UNSPECIFIED -SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin" -ADMIN_SCOPE = "https://www.googleapis.com/auth/bigtable.admin" -"""Scope for interacting with the Cluster Admin and Table Admin APIs.""" -DATA_SCOPE = "https://www.googleapis.com/auth/bigtable.data" -"""Scope for reading and writing table data.""" -READ_ONLY_SCOPE = "https://www.googleapis.com/auth/bigtable.data.readonly" -"""Scope for reading table data.""" - -_DEFAULT_BIGTABLE_EMULATOR_CLIENT = "google-cloud-bigtable-emulator" -_GRPC_CHANNEL_OPTIONS = ( - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ("grpc.keepalive_time_ms", 30000), - ("grpc.keepalive_timeout_ms", 10000), -) - - -def _create_gapic_client(client_class, client_options=None, transport=None): - def inner(self): - return client_class( - credentials=None, - client_info=self._client_info, - client_options=client_options, - transport=transport, - ) - - return inner - - -class Client(ClientWithProject): - """Client for interacting with Google Cloud Bigtable API. - - DEPRECATED: This class is deprecated and may be removed in a future version - Please use `google.cloud.bigtable.BigtableDataClient` instead. - - .. note:: - - Since the Cloud Bigtable API requires the gRPC transport, no - ``_http`` argument is accepted by this class. - - :type project: :class:`str` or :func:`unicode ` - :param project: (Optional) The ID of the project which owns the - instances, tables and data. If not provided, will - attempt to determine from the environment. - - :type credentials: :class:`~google.auth.credentials.Credentials` - :param credentials: (Optional) The OAuth2 Credentials to use for this - client. If not passed, falls back to the default - inferred from the environment. - - :type read_only: bool - :param read_only: (Optional) Boolean indicating if the data scope should be - for reading only (or for writing as well). Defaults to - :data:`False`. - - :type admin: bool - :param admin: (Optional) Boolean indicating if the client will be used to - interact with the Instance Admin or Table Admin APIs. This - requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`. - - :type: client_info: :class:`google.api_core.gapic_v1.client_info.ClientInfo` - :param client_info: - The client info used to send a user-agent string along with API - requests. If ``None``, then default info will be used. Generally, - you only need to set this if you're developing your own library - or partner tool. - - :type client_options: :class:`~google.api_core.client_options.ClientOptions` - or :class:`dict` - :param client_options: (Optional) Client options used to set user options - on the client. API Endpoint should be set through client_options. - - :type admin_client_options: - :class:`~google.api_core.client_options.ClientOptions` or :class:`dict` - :param admin_client_options: (Optional) Client options used to set user - options on the client. API Endpoint for admin operations should be set - through admin_client_options. - - :type channel: :instance: grpc.Channel - :param channel (grpc.Channel): (Optional) DEPRECATED: - A ``Channel`` instance through which to make calls. - This argument is mutually exclusive with ``credentials``; - providing both will raise an exception. No longer used. - - :raises: :class:`ValueError ` if both ``read_only`` - and ``admin`` are :data:`True` - """ - - _table_data_client = None - _table_admin_client = None - _instance_admin_client = None - - def __init__( - self, - project=None, - credentials=None, - read_only=False, - admin=False, - client_info=None, - client_options=None, - admin_client_options=None, - channel=None, - ): - warnings.warn( - "'Client' is deprecated. Please use 'google.cloud.bigtable.BigtableDataClient' instead.", - DeprecationWarning, - stacklevel=2, - ) - if client_info is None: - client_info = client_info_lib.ClientInfo( - client_library_version=bigtable.__version__, - ) - if read_only and admin: - raise ValueError( - "A read-only client cannot also perform" "administrative actions." - ) - - # NOTE: We set the scopes **before** calling the parent constructor. - # It **may** use those scopes in ``with_scopes_if_required``. - self._read_only = bool(read_only) - self._admin = bool(admin) - self._client_info = client_info - self._emulator_host = os.getenv(BIGTABLE_EMULATOR) - - if self._emulator_host is not None: - if credentials is None: - credentials = AnonymousCredentials() - if project is None: - project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT - - if channel is not None: - warnings.warn( - "'channel' is deprecated and no longer used.", - DeprecationWarning, - stacklevel=2, - ) - - self._client_options = client_options - self._admin_client_options = admin_client_options - self._channel = channel - self.SCOPE = self._get_scopes() - super(Client, self).__init__( - project=project, - credentials=credentials, - client_options=client_options, - ) - - def _get_scopes(self): - """Get the scopes corresponding to admin / read-only state. - - Returns: - Tuple[str, ...]: The tuple of scopes. - """ - if self._read_only: - scopes = (READ_ONLY_SCOPE,) - else: - scopes = (DATA_SCOPE,) - - if self._admin: - scopes += (ADMIN_SCOPE,) - - return scopes - - def _emulator_channel(self, transport, options): - """Create a channel using self._credentials - - Works in a similar way to ``grpc.secure_channel`` but using - ``grpc.local_channel_credentials`` rather than - ``grpc.ssh_channel_credentials`` to allow easy connection to a - local emulator. - - Returns: - grpc.Channel or grpc.aio.Channel - """ - # TODO: Implement a special credentials type for emulator and use - # "transport.create_channel" to create gRPC channels once google-auth - # extends it's allowed credentials types. - # Note: this code also exists in the firestore client. - if "GrpcAsyncIOTransport" in str(transport.__name__): - return grpc.aio.secure_channel( - self._emulator_host, - self._local_composite_credentials(), - options=options, - ) - else: - return grpc.secure_channel( - self._emulator_host, - self._local_composite_credentials(), - options=options, - ) - - def _local_composite_credentials(self): - """Create credentials for the local emulator channel. - - :return: grpc.ChannelCredentials - """ - credentials = google.auth.credentials.with_scopes_if_required( - self._credentials, None - ) - request = google.auth.transport.requests.Request() - - # Create the metadata plugin for inserting the authorization header. - metadata_plugin = google.auth.transport.grpc.AuthMetadataPlugin( - credentials, request - ) - - # Create a set of grpc.CallCredentials using the metadata plugin. - google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin) - - # Using the local_credentials to allow connection to emulator - local_credentials = grpc.local_channel_credentials() - - # Combine the local credentials and the authorization credentials. - return grpc.composite_channel_credentials( - local_credentials, google_auth_credentials - ) - - def _create_gapic_client_channel(self, client_class, grpc_transport): - if self._emulator_host is not None: - api_endpoint = self._emulator_host - elif self._client_options and self._client_options.api_endpoint: - api_endpoint = self._client_options.api_endpoint - else: - api_endpoint = client_class.DEFAULT_ENDPOINT - - if self._emulator_host is not None: - channel = self._emulator_channel( - transport=grpc_transport, - options=_GRPC_CHANNEL_OPTIONS, - ) - else: - channel = grpc_transport.create_channel( - host=api_endpoint, - credentials=self._credentials, - options=_GRPC_CHANNEL_OPTIONS, - ) - return grpc_transport(channel=channel, host=api_endpoint) - - @property - def project_path(self): - """Project name to be used with Instance Admin API. - - .. note:: - - This property will not change if ``project`` does not, but the - return value is not cached. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_api_project_path] - :end-before: [END bigtable_api_project_path] - :dedent: 4 - - The project name is of the form - - ``"projects/{project}"`` - - :rtype: str - :returns: Return a fully-qualified project string. - """ - return self.instance_admin_client.common_project_path(self.project) - - @property - def table_data_client(self): - """Getter for the gRPC stub used for the Table Admin API. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_api_table_data_client] - :end-before: [END bigtable_api_table_data_client] - :dedent: 4 - - :rtype: :class:`.bigtable_v2.BigtableClient` - :returns: A BigtableClient object. - """ - if self._table_data_client is None: - transport = self._create_gapic_client_channel( - bigtable_v2.BigtableClient, - BigtableGrpcTransport, - ) - klass = _create_gapic_client( - bigtable_v2.BigtableClient, - client_options=self._client_options, - transport=transport, - ) - self._table_data_client = klass(self) - return self._table_data_client - - @property - def table_admin_client(self): - """Getter for the gRPC stub used for the Table Admin API. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_api_table_admin_client] - :end-before: [END bigtable_api_table_admin_client] - :dedent: 4 - - :rtype: :class:`.bigtable_admin_pb2.BigtableTableAdmin` - :returns: A BigtableTableAdmin instance. - :raises: :class:`ValueError ` if the current - client is not an admin client or if it has not been - :meth:`start`-ed. - """ - if self._table_admin_client is None: - if not self._admin: - raise ValueError("Client is not an admin client.") - - transport = self._create_gapic_client_channel( - bigtable_admin_v2.BigtableTableAdminClient, - BigtableTableAdminGrpcTransport, - ) - klass = _create_gapic_client( - bigtable_admin_v2.BigtableTableAdminClient, - client_options=self._admin_client_options, - transport=transport, - ) - self._table_admin_client = klass(self) - return self._table_admin_client - - @property - def instance_admin_client(self): - """Getter for the gRPC stub used for the Table Admin API. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_api_instance_admin_client] - :end-before: [END bigtable_api_instance_admin_client] - :dedent: 4 - - :rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin` - :returns: A BigtableInstanceAdmin instance. - :raises: :class:`ValueError ` if the current - client is not an admin client or if it has not been - :meth:`start`-ed. - """ - if self._instance_admin_client is None: - if not self._admin: - raise ValueError("Client is not an admin client.") - - transport = self._create_gapic_client_channel( - bigtable_admin_v2.BigtableInstanceAdminClient, - BigtableInstanceAdminGrpcTransport, - ) - klass = _create_gapic_client( - bigtable_admin_v2.BigtableInstanceAdminClient, - client_options=self._admin_client_options, - transport=transport, - ) - self._instance_admin_client = klass(self) - return self._instance_admin_client - - def instance(self, instance_id, display_name=None, instance_type=None, labels=None): - """Factory to create a instance associated with this client. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_api_create_prod_instance] - :end-before: [END bigtable_api_create_prod_instance] - :dedent: 4 - - :type instance_id: str - :param instance_id: The ID of the instance. - - :type display_name: str - :param display_name: (Optional) The display name for the instance in - the Cloud Console UI. (Must be between 4 and 30 - characters.) If this value is not set in the - constructor, will fall back to the instance ID. - - :type instance_type: int - :param instance_type: (Optional) The type of the instance. - Possible values are represented - by the following constants: - :data:`google.cloud.bigtable.deprecated.instance.InstanceType.PRODUCTION`. - :data:`google.cloud.bigtable.deprecated.instance.InstanceType.DEVELOPMENT`, - Defaults to - :data:`google.cloud.bigtable.deprecated.instance.InstanceType.UNSPECIFIED`. - - :type labels: dict - :param labels: (Optional) Labels are a flexible and lightweight - mechanism for organizing cloud resources into groups - that reflect a customer's organizational needs and - deployment strategies. They can be used to filter - resources and aggregate metrics. Label keys must be - between 1 and 63 characters long. Maximum 64 labels can - be associated with a given resource. Label values must - be between 0 and 63 characters long. Keys and values - must both be under 128 bytes. - - :rtype: :class:`~google.cloud.bigtable.deprecated.instance.Instance` - :returns: an instance owned by this client. - """ - return Instance( - instance_id, - self, - display_name=display_name, - instance_type=instance_type, - labels=labels, - ) - - def list_instances(self): - """List instances owned by the project. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_api_list_instances] - :end-before: [END bigtable_api_list_instances] - :dedent: 4 - - :rtype: tuple - :returns: - (instances, failed_locations), where 'instances' is list of - :class:`google.cloud.bigtable.deprecated.instance.Instance`, and - 'failed_locations' is a list of locations which could not - be resolved. - """ - resp = self.instance_admin_client.list_instances( - request={"parent": self.project_path} - ) - instances = [Instance.from_pb(instance, self) for instance in resp.instances] - return instances, resp.failed_locations - - def list_clusters(self): - """List the clusters in the project. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_api_list_clusters_in_project] - :end-before: [END bigtable_api_list_clusters_in_project] - :dedent: 4 - - :rtype: tuple - :returns: - (clusters, failed_locations), where 'clusters' is list of - :class:`google.cloud.bigtable.deprecated.instance.Cluster`, and - 'failed_locations' is a list of strings representing - locations which could not be resolved. - """ - resp = self.instance_admin_client.list_clusters( - request={ - "parent": self.instance_admin_client.instance_path(self.project, "-") - } - ) - clusters = [] - instances = {} - for cluster in resp.clusters: - match_cluster_name = _CLUSTER_NAME_RE.match(cluster.name) - instance_id = match_cluster_name.group("instance") - if instance_id not in instances: - instances[instance_id] = self.instance(instance_id) - clusters.append(Cluster.from_pb(cluster, instances[instance_id])) - return clusters, resp.failed_locations diff --git a/google/cloud/bigtable/deprecated/py.typed b/google/cloud/bigtable/deprecated/py.typed deleted file mode 100644 index 7bd4705d4..000000000 --- a/google/cloud/bigtable/deprecated/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-bigtable package uses inline types. diff --git a/google/cloud/bigtable/deprecated/row.py b/google/cloud/bigtable/deprecated/row.py deleted file mode 100644 index 3b114a74a..000000000 --- a/google/cloud/bigtable/deprecated/row.py +++ /dev/null @@ -1,1267 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User-friendly container for Google Cloud Bigtable Row.""" - - -import struct - -from google.cloud._helpers import _datetime_from_microseconds # type: ignore -from google.cloud._helpers import _microseconds_from_datetime # type: ignore -from google.cloud._helpers import _to_bytes # type: ignore -from google.cloud.bigtable_v2.types import data as data_v2_pb2 - - -_PACK_I64 = struct.Struct(">q").pack - -MAX_MUTATIONS = 100000 -"""The maximum number of mutations that a row can accumulate.""" - -_MISSING_COLUMN_FAMILY = "Column family {} is not among the cells stored in this row." -_MISSING_COLUMN = ( - "Column {} is not among the cells stored in this row in the column family {}." -) -_MISSING_INDEX = ( - "Index {!r} is not valid for the cells stored in this row for column {} " - "in the column family {}. There are {} such cells." -) - - -class Row(object): - """Base representation of a Google Cloud Bigtable Row. - - This class has three subclasses corresponding to the three - RPC methods for sending row mutations: - - * :class:`DirectRow` for ``MutateRow`` - * :class:`ConditionalRow` for ``CheckAndMutateRow`` - * :class:`AppendRow` for ``ReadModifyWriteRow`` - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: (Optional) The table that owns the row. - """ - - def __init__(self, row_key, table=None): - self._row_key = _to_bytes(row_key) - self._table = table - - @property - def row_key(self): - """Row key. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_row_key] - :end-before: [END bigtable_api_row_row_key] - :dedent: 4 - - :rtype: bytes - :returns: The key for the current row. - """ - return self._row_key - - @property - def table(self): - """Row table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_table] - :end-before: [END bigtable_api_row_table] - :dedent: 4 - - :rtype: table: :class:`Table ` - :returns: table: The table that owns the row. - """ - return self._table - - -class _SetDeleteRow(Row): - """Row helper for setting or deleting cell values. - - Implements helper methods to add mutations to set or delete cell contents: - - * :meth:`set_cell` - * :meth:`delete` - * :meth:`delete_cell` - * :meth:`delete_cells` - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: The table that owns the row. - """ - - ALL_COLUMNS = object() - """Sentinel value used to indicate all columns in a column family.""" - - def _get_mutations(self, state=None): - """Gets the list of mutations for a given state. - - This method intended to be implemented by subclasses. - - ``state`` may not need to be used by all subclasses. - - :type state: bool - :param state: The state that the mutation should be - applied in. - - :raises: :class:`NotImplementedError ` - always. - """ - raise NotImplementedError - - def _set_cell(self, column_family_id, column, value, timestamp=None, state=None): - """Helper for :meth:`set_cell` - - Adds a mutation to set the value in a specific cell. - - ``state`` is unused by :class:`DirectRow` but is used by - subclasses. - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type value: bytes or :class:`int` - :param value: The value to set in the cell. If an integer is used, - will be interpreted as a 64-bit big-endian signed - integer (8 bytes). - - :type timestamp: :class:`datetime.datetime` - :param timestamp: (Optional) The timestamp of the operation. - - :type state: bool - :param state: (Optional) The state that is passed along to - :meth:`_get_mutations`. - """ - column = _to_bytes(column) - if isinstance(value, int): - value = _PACK_I64(value) - value = _to_bytes(value) - if timestamp is None: - # Use -1 for current Bigtable server time. - timestamp_micros = -1 - else: - timestamp_micros = _microseconds_from_datetime(timestamp) - # Truncate to millisecond granularity. - timestamp_micros -= timestamp_micros % 1000 - - mutation_val = data_v2_pb2.Mutation.SetCell( - family_name=column_family_id, - column_qualifier=column, - timestamp_micros=timestamp_micros, - value=value, - ) - mutation_pb = data_v2_pb2.Mutation(set_cell=mutation_val) - self._get_mutations(state).append(mutation_pb) - - def _delete(self, state=None): - """Helper for :meth:`delete` - - Adds a delete mutation (for the entire row) to the accumulated - mutations. - - ``state`` is unused by :class:`DirectRow` but is used by - subclasses. - - :type state: bool - :param state: (Optional) The state that is passed along to - :meth:`_get_mutations`. - """ - mutation_val = data_v2_pb2.Mutation.DeleteFromRow() - mutation_pb = data_v2_pb2.Mutation(delete_from_row=mutation_val) - self._get_mutations(state).append(mutation_pb) - - def _delete_cells(self, column_family_id, columns, time_range=None, state=None): - """Helper for :meth:`delete_cell` and :meth:`delete_cells`. - - ``state`` is unused by :class:`DirectRow` but is used by - subclasses. - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type columns: :class:`list` of :class:`str` / - :func:`unicode `, or :class:`object` - :param columns: The columns within the column family that will have - cells deleted. If :attr:`ALL_COLUMNS` is used then - the entire column family will be deleted from the row. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - - :type state: bool - :param state: (Optional) The state that is passed along to - :meth:`_get_mutations`. - """ - mutations_list = self._get_mutations(state) - if columns is self.ALL_COLUMNS: - mutation_val = data_v2_pb2.Mutation.DeleteFromFamily( - family_name=column_family_id - ) - mutation_pb = data_v2_pb2.Mutation(delete_from_family=mutation_val) - mutations_list.append(mutation_pb) - else: - delete_kwargs = {} - if time_range is not None: - delete_kwargs["time_range"] = time_range.to_pb() - - to_append = [] - for column in columns: - column = _to_bytes(column) - # time_range will never change if present, but the rest of - # delete_kwargs will - delete_kwargs.update( - family_name=column_family_id, column_qualifier=column - ) - mutation_val = data_v2_pb2.Mutation.DeleteFromColumn(**delete_kwargs) - mutation_pb = data_v2_pb2.Mutation(delete_from_column=mutation_val) - to_append.append(mutation_pb) - - # We don't add the mutations until all columns have been - # processed without error. - mutations_list.extend(to_append) - - -class DirectRow(_SetDeleteRow): - """Google Cloud Bigtable Row for sending "direct" mutations. - - These mutations directly set or delete cell contents: - - * :meth:`set_cell` - * :meth:`delete` - * :meth:`delete_cell` - * :meth:`delete_cells` - - These methods can be used directly:: - - >>> row = table.row(b'row-key1') - >>> row.set_cell(u'fam', b'col1', b'cell-val') - >>> row.delete_cell(u'fam', b'col2') - - .. note:: - - A :class:`DirectRow` accumulates mutations locally via the - :meth:`set_cell`, :meth:`delete`, :meth:`delete_cell` and - :meth:`delete_cells` methods. To actually send these mutations to the - Google Cloud Bigtable API, you must call :meth:`commit`. - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: (Optional) The table that owns the row. This is - used for the :meth: `commit` only. Alternatively, - DirectRows can be persisted via - :meth:`~google.cloud.bigtable.deprecated.table.Table.mutate_rows`. - """ - - def __init__(self, row_key, table=None): - super(DirectRow, self).__init__(row_key, table) - self._pb_mutations = [] - - def _get_mutations(self, state=None): # pylint: disable=unused-argument - """Gets the list of mutations for a given state. - - ``state`` is unused by :class:`DirectRow` but is used by - subclasses. - - :type state: bool - :param state: The state that the mutation should be - applied in. - - :rtype: list - :returns: The list to add new mutations to (for the current state). - """ - return self._pb_mutations - - def get_mutations_size(self): - """Gets the total mutations size for current row - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_get_mutations_size] - :end-before: [END bigtable_api_row_get_mutations_size] - :dedent: 4 - """ - - mutation_size = 0 - for mutation in self._get_mutations(): - mutation_size += mutation._pb.ByteSize() - - return mutation_size - - def set_cell(self, column_family_id, column, value, timestamp=None): - """Sets a value in this row. - - The cell is determined by the ``row_key`` of this :class:`DirectRow` - and the ``column``. The ``column`` must be in an existing - :class:`.ColumnFamily` (as determined by ``column_family_id``). - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_set_cell] - :end-before: [END bigtable_api_row_set_cell] - :dedent: 4 - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type value: bytes or :class:`int` - :param value: The value to set in the cell. If an integer is used, - will be interpreted as a 64-bit big-endian signed - integer (8 bytes). - - :type timestamp: :class:`datetime.datetime` - :param timestamp: (Optional) The timestamp of the operation. - """ - self._set_cell(column_family_id, column, value, timestamp=timestamp, state=None) - - def delete(self): - """Deletes this row from the table. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_delete] - :end-before: [END bigtable_api_row_delete] - :dedent: 4 - """ - self._delete(state=None) - - def delete_cell(self, column_family_id, column, time_range=None): - """Deletes cell in this row. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_delete_cell] - :end-before: [END bigtable_api_row_delete_cell] - :dedent: 4 - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family that will have a - cell deleted. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - """ - self._delete_cells( - column_family_id, [column], time_range=time_range, state=None - ) - - def delete_cells(self, column_family_id, columns, time_range=None): - """Deletes cells in this row. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_delete_cells] - :end-before: [END bigtable_api_row_delete_cells] - :dedent: 4 - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type columns: :class:`list` of :class:`str` / - :func:`unicode `, or :class:`object` - :param columns: The columns within the column family that will have - cells deleted. If :attr:`ALL_COLUMNS` is used then - the entire column family will be deleted from the row. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - """ - self._delete_cells(column_family_id, columns, time_range=time_range, state=None) - - def commit(self): - """Makes a ``MutateRow`` API request. - - If no mutations have been created in the row, no request is made. - - Mutations are applied atomically and in order, meaning that earlier - mutations can be masked / negated by later ones. Cells already present - in the row are left unchanged unless explicitly changed by a mutation. - - After committing the accumulated mutations, resets the local - mutations to an empty list. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_commit] - :end-before: [END bigtable_api_row_commit] - :dedent: 4 - - :rtype: :class:`~google.rpc.status_pb2.Status` - :returns: A response status (`google.rpc.status_pb2.Status`) - representing success or failure of the row committed. - :raises: :exc:`~.table.TooManyMutationsError` if the number of - mutations is greater than 100,000. - """ - response = self._table.mutate_rows([self]) - - self.clear() - - return response[0] - - def clear(self): - """Removes all currently accumulated mutations on the current row. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_clear] - :end-before: [END bigtable_api_row_clear] - :dedent: 4 - """ - del self._pb_mutations[:] - - -class ConditionalRow(_SetDeleteRow): - """Google Cloud Bigtable Row for sending mutations conditionally. - - Each mutation has an associated state: :data:`True` or :data:`False`. - When :meth:`commit`-ed, the mutations for the :data:`True` - state will be applied if the filter matches any cells in - the row, otherwise the :data:`False` state will be applied. - - A :class:`ConditionalRow` accumulates mutations in the same way a - :class:`DirectRow` does: - - * :meth:`set_cell` - * :meth:`delete` - * :meth:`delete_cell` - * :meth:`delete_cells` - - with the only change the extra ``state`` parameter:: - - >>> row_cond = table.row(b'row-key2', filter_=row_filter) - >>> row_cond.set_cell(u'fam', b'col', b'cell-val', state=True) - >>> row_cond.delete_cell(u'fam', b'col', state=False) - - .. note:: - - As with :class:`DirectRow`, to actually send these mutations to the - Google Cloud Bigtable API, you must call :meth:`commit`. - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: The table that owns the row. - - :type filter_: :class:`.RowFilter` - :param filter_: Filter to be used for conditional mutations. - """ - - def __init__(self, row_key, table, filter_): - super(ConditionalRow, self).__init__(row_key, table) - self._filter = filter_ - self._true_pb_mutations = [] - self._false_pb_mutations = [] - - def _get_mutations(self, state=None): - """Gets the list of mutations for a given state. - - Over-ridden so that the state can be used in: - - * :meth:`set_cell` - * :meth:`delete` - * :meth:`delete_cell` - * :meth:`delete_cells` - - :type state: bool - :param state: The state that the mutation should be - applied in. - - :rtype: list - :returns: The list to add new mutations to (for the current state). - """ - if state: - return self._true_pb_mutations - else: - return self._false_pb_mutations - - def commit(self): - """Makes a ``CheckAndMutateRow`` API request. - - If no mutations have been created in the row, no request is made. - - The mutations will be applied conditionally, based on whether the - filter matches any cells in the :class:`ConditionalRow` or not. (Each - method which adds a mutation has a ``state`` parameter for this - purpose.) - - Mutations are applied atomically and in order, meaning that earlier - mutations can be masked / negated by later ones. Cells already present - in the row are left unchanged unless explicitly changed by a mutation. - - After committing the accumulated mutations, resets the local - mutations. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_commit] - :end-before: [END bigtable_api_row_commit] - :dedent: 4 - - :rtype: bool - :returns: Flag indicating if the filter was matched (which also - indicates which set of mutations were applied by the server). - :raises: :class:`ValueError ` if the number of - mutations exceeds the :data:`MAX_MUTATIONS`. - """ - true_mutations = self._get_mutations(state=True) - false_mutations = self._get_mutations(state=False) - num_true_mutations = len(true_mutations) - num_false_mutations = len(false_mutations) - if num_true_mutations == 0 and num_false_mutations == 0: - return - if num_true_mutations > MAX_MUTATIONS or num_false_mutations > MAX_MUTATIONS: - raise ValueError( - "Exceed the maximum allowable mutations (%d). Had %s true " - "mutations and %d false mutations." - % (MAX_MUTATIONS, num_true_mutations, num_false_mutations) - ) - - data_client = self._table._instance._client.table_data_client - resp = data_client.check_and_mutate_row( - table_name=self._table.name, - row_key=self._row_key, - predicate_filter=self._filter.to_pb(), - app_profile_id=self._table._app_profile_id, - true_mutations=true_mutations, - false_mutations=false_mutations, - ) - self.clear() - return resp.predicate_matched - - # pylint: disable=arguments-differ - def set_cell(self, column_family_id, column, value, timestamp=None, state=True): - """Sets a value in this row. - - The cell is determined by the ``row_key`` of this - :class:`ConditionalRow` and the ``column``. The ``column`` must be in - an existing :class:`.ColumnFamily` (as determined by - ``column_family_id``). - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_set_cell] - :end-before: [END bigtable_api_row_set_cell] - :dedent: 4 - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type value: bytes or :class:`int` - :param value: The value to set in the cell. If an integer is used, - will be interpreted as a 64-bit big-endian signed - integer (8 bytes). - - :type timestamp: :class:`datetime.datetime` - :param timestamp: (Optional) The timestamp of the operation. - - :type state: bool - :param state: (Optional) The state that the mutation should be - applied in. Defaults to :data:`True`. - """ - self._set_cell( - column_family_id, column, value, timestamp=timestamp, state=state - ) - - def delete(self, state=True): - """Deletes this row from the table. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_delete] - :end-before: [END bigtable_api_row_delete] - :dedent: 4 - - :type state: bool - :param state: (Optional) The state that the mutation should be - applied in. Defaults to :data:`True`. - """ - self._delete(state=state) - - def delete_cell(self, column_family_id, column, time_range=None, state=True): - """Deletes cell in this row. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_delete_cell] - :end-before: [END bigtable_api_row_delete_cell] - :dedent: 4 - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family that will have a - cell deleted. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - - :type state: bool - :param state: (Optional) The state that the mutation should be - applied in. Defaults to :data:`True`. - """ - self._delete_cells( - column_family_id, [column], time_range=time_range, state=state - ) - - def delete_cells(self, column_family_id, columns, time_range=None, state=True): - """Deletes cells in this row. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_delete_cells] - :end-before: [END bigtable_api_row_delete_cells] - :dedent: 4 - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type columns: :class:`list` of :class:`str` / - :func:`unicode `, or :class:`object` - :param columns: The columns within the column family that will have - cells deleted. If :attr:`ALL_COLUMNS` is used then the - entire column family will be deleted from the row. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - - :type state: bool - :param state: (Optional) The state that the mutation should be - applied in. Defaults to :data:`True`. - """ - self._delete_cells( - column_family_id, columns, time_range=time_range, state=state - ) - - # pylint: enable=arguments-differ - - def clear(self): - """Removes all currently accumulated mutations on the current row. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_clear] - :end-before: [END bigtable_api_row_clear] - :dedent: 4 - """ - del self._true_pb_mutations[:] - del self._false_pb_mutations[:] - - -class AppendRow(Row): - """Google Cloud Bigtable Row for sending append mutations. - - These mutations are intended to augment the value of an existing cell - and uses the methods: - - * :meth:`append_cell_value` - * :meth:`increment_cell_value` - - The first works by appending bytes and the second by incrementing an - integer (stored in the cell as 8 bytes). In either case, if the - cell is empty, assumes the default empty value (empty string for - bytes or 0 for integer). - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: The table that owns the row. - """ - - def __init__(self, row_key, table): - super(AppendRow, self).__init__(row_key, table) - self._rule_pb_list = [] - - def clear(self): - """Removes all currently accumulated modifications on current row. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_clear] - :end-before: [END bigtable_api_row_clear] - :dedent: 4 - """ - del self._rule_pb_list[:] - - def append_cell_value(self, column_family_id, column, value): - """Appends a value to an existing cell. - - .. note:: - - This method adds a read-modify rule protobuf to the accumulated - read-modify rules on this row, but does not make an API - request. To actually send an API request (with the rules) to the - Google Cloud Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_append_cell_value] - :end-before: [END bigtable_api_row_append_cell_value] - :dedent: 4 - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type value: bytes - :param value: The value to append to the existing value in the cell. If - the targeted cell is unset, it will be treated as - containing the empty string. - """ - column = _to_bytes(column) - value = _to_bytes(value) - rule_pb = data_v2_pb2.ReadModifyWriteRule( - family_name=column_family_id, column_qualifier=column, append_value=value - ) - self._rule_pb_list.append(rule_pb) - - def increment_cell_value(self, column_family_id, column, int_value): - """Increments a value in an existing cell. - - Assumes the value in the cell is stored as a 64 bit integer - serialized to bytes. - - .. note:: - - This method adds a read-modify rule protobuf to the accumulated - read-modify rules on this row, but does not make an API - request. To actually send an API request (with the rules) to the - Google Cloud Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_increment_cell_value] - :end-before: [END bigtable_api_row_increment_cell_value] - :dedent: 4 - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type int_value: int - :param int_value: The value to increment the existing value in the cell - by. If the targeted cell is unset, it will be treated - as containing a zero. Otherwise, the targeted cell - must contain an 8-byte value (interpreted as a 64-bit - big-endian signed integer), or the entire request - will fail. - """ - column = _to_bytes(column) - rule_pb = data_v2_pb2.ReadModifyWriteRule( - family_name=column_family_id, - column_qualifier=column, - increment_amount=int_value, - ) - self._rule_pb_list.append(rule_pb) - - def commit(self): - """Makes a ``ReadModifyWriteRow`` API request. - - This commits modifications made by :meth:`append_cell_value` and - :meth:`increment_cell_value`. If no modifications were made, makes - no API request and just returns ``{}``. - - Modifies a row atomically, reading the latest existing - timestamp / value from the specified columns and writing a new value by - appending / incrementing. The new cell created uses either the current - server time or the highest timestamp of a cell in that column (if it - exceeds the server time). - - After committing the accumulated mutations, resets the local mutations. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_commit] - :end-before: [END bigtable_api_row_commit] - :dedent: 4 - - :rtype: dict - :returns: The new contents of all modified cells. Returned as a - dictionary of column families, each of which holds a - dictionary of columns. Each column contains a list of cells - modified. Each cell is represented with a two-tuple with the - value (in bytes) and the timestamp for the cell. - :raises: :class:`ValueError ` if the number of - mutations exceeds the :data:`MAX_MUTATIONS`. - """ - num_mutations = len(self._rule_pb_list) - if num_mutations == 0: - return {} - if num_mutations > MAX_MUTATIONS: - raise ValueError( - "%d total append mutations exceed the maximum " - "allowable %d." % (num_mutations, MAX_MUTATIONS) - ) - - data_client = self._table._instance._client.table_data_client - row_response = data_client.read_modify_write_row( - table_name=self._table.name, - row_key=self._row_key, - rules=self._rule_pb_list, - app_profile_id=self._table._app_profile_id, - ) - - # Reset modifications after commit-ing request. - self.clear() - - # NOTE: We expect row_response.key == self._row_key but don't check. - return _parse_rmw_row_response(row_response) - - -def _parse_rmw_row_response(row_response): - """Parses the response to a ``ReadModifyWriteRow`` request. - - :type row_response: :class:`.data_v2_pb2.Row` - :param row_response: The response row (with only modified cells) from a - ``ReadModifyWriteRow`` request. - - :rtype: dict - :returns: The new contents of all modified cells. Returned as a - dictionary of column families, each of which holds a - dictionary of columns. Each column contains a list of cells - modified. Each cell is represented with a two-tuple with the - value (in bytes) and the timestamp for the cell. For example: - - .. code:: python - - { - u'col-fam-id': { - b'col-name1': [ - (b'cell-val', datetime.datetime(...)), - (b'cell-val-newer', datetime.datetime(...)), - ], - b'col-name2': [ - (b'altcol-cell-val', datetime.datetime(...)), - ], - }, - u'col-fam-id2': { - b'col-name3-but-other-fam': [ - (b'foo', datetime.datetime(...)), - ], - }, - } - """ - result = {} - for column_family in row_response.row.families: - column_family_id, curr_family = _parse_family_pb(column_family) - result[column_family_id] = curr_family - return result - - -def _parse_family_pb(family_pb): - """Parses a Family protobuf into a dictionary. - - :type family_pb: :class:`._generated.data_pb2.Family` - :param family_pb: A protobuf - - :rtype: tuple - :returns: A string and dictionary. The string is the name of the - column family and the dictionary has column names (within the - family) as keys and cell lists as values. Each cell is - represented with a two-tuple with the value (in bytes) and the - timestamp for the cell. For example: - - .. code:: python - - { - b'col-name1': [ - (b'cell-val', datetime.datetime(...)), - (b'cell-val-newer', datetime.datetime(...)), - ], - b'col-name2': [ - (b'altcol-cell-val', datetime.datetime(...)), - ], - } - """ - result = {} - for column in family_pb.columns: - result[column.qualifier] = cells = [] - for cell in column.cells: - val_pair = (cell.value, _datetime_from_microseconds(cell.timestamp_micros)) - cells.append(val_pair) - - return family_pb.name, result - - -class PartialRowData(object): - """Representation of partial row in a Google Cloud Bigtable Table. - - These are expected to be updated directly from a - :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` - - :type row_key: bytes - :param row_key: The key for the row holding the (partial) data. - """ - - def __init__(self, row_key): - self._row_key = row_key - self._cells = {} - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other._row_key == self._row_key and other._cells == self._cells - - def __ne__(self, other): - return not self == other - - def to_dict(self): - """Convert the cells to a dictionary. - - This is intended to be used with HappyBase, so the column family and - column qualiers are combined (with ``:``). - - :rtype: dict - :returns: Dictionary containing all the data in the cells of this row. - """ - result = {} - for column_family_id, columns in self._cells.items(): - for column_qual, cells in columns.items(): - key = _to_bytes(column_family_id) + b":" + _to_bytes(column_qual) - result[key] = cells - return result - - @property - def cells(self): - """Property returning all the cells accumulated on this partial row. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_data_cells] - :end-before: [END bigtable_api_row_data_cells] - :dedent: 4 - - :rtype: dict - :returns: Dictionary of the :class:`Cell` objects accumulated. This - dictionary has two-levels of keys (first for column families - and second for column names/qualifiers within a family). For - a given column, a list of :class:`Cell` objects is stored. - """ - return self._cells - - @property - def row_key(self): - """Getter for the current (partial) row's key. - - :rtype: bytes - :returns: The current (partial) row's key. - """ - return self._row_key - - def find_cells(self, column_family_id, column): - """Get a time series of cells stored on this instance. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_find_cells] - :end-before: [END bigtable_api_row_find_cells] - :dedent: 4 - - Args: - column_family_id (str): The ID of the column family. Must be of the - form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - column (bytes): The column within the column family where the cells - are located. - - Returns: - List[~google.cloud.bigtable.deprecated.row_data.Cell]: The cells stored in the - specified column. - - Raises: - KeyError: If ``column_family_id`` is not among the cells stored - in this row. - KeyError: If ``column`` is not among the cells stored in this row - for the given ``column_family_id``. - """ - try: - column_family = self._cells[column_family_id] - except KeyError: - raise KeyError(_MISSING_COLUMN_FAMILY.format(column_family_id)) - - try: - cells = column_family[column] - except KeyError: - raise KeyError(_MISSING_COLUMN.format(column, column_family_id)) - - return cells - - def cell_value(self, column_family_id, column, index=0): - """Get a single cell value stored on this instance. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_cell_value] - :end-before: [END bigtable_api_row_cell_value] - :dedent: 4 - - Args: - column_family_id (str): The ID of the column family. Must be of the - form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - column (bytes): The column within the column family where the cell - is located. - index (Optional[int]): The offset within the series of values. If - not specified, will return the first cell. - - Returns: - ~google.cloud.bigtable.deprecated.row_data.Cell value: The cell value stored - in the specified column and specified index. - - Raises: - KeyError: If ``column_family_id`` is not among the cells stored - in this row. - KeyError: If ``column`` is not among the cells stored in this row - for the given ``column_family_id``. - IndexError: If ``index`` cannot be found within the cells stored - in this row for the given ``column_family_id``, ``column`` - pair. - """ - cells = self.find_cells(column_family_id, column) - - try: - cell = cells[index] - except (TypeError, IndexError): - num_cells = len(cells) - msg = _MISSING_INDEX.format(index, column, column_family_id, num_cells) - raise IndexError(msg) - - return cell.value - - def cell_values(self, column_family_id, column, max_count=None): - """Get a time series of cells stored on this instance. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_cell_values] - :end-before: [END bigtable_api_row_cell_values] - :dedent: 4 - - Args: - column_family_id (str): The ID of the column family. Must be of the - form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - column (bytes): The column within the column family where the cells - are located. - max_count (int): The maximum number of cells to use. - - Returns: - A generator which provides: cell.value, cell.timestamp_micros - for each cell in the list of cells - - Raises: - KeyError: If ``column_family_id`` is not among the cells stored - in this row. - KeyError: If ``column`` is not among the cells stored in this row - for the given ``column_family_id``. - """ - cells = self.find_cells(column_family_id, column) - if max_count is None: - max_count = len(cells) - - for index, cell in enumerate(cells): - if index == max_count: - break - - yield cell.value, cell.timestamp_micros - - -class Cell(object): - """Representation of a Google Cloud Bigtable Cell. - - :type value: bytes - :param value: The value stored in the cell. - - :type timestamp_micros: int - :param timestamp_micros: The timestamp_micros when the cell was stored. - - :type labels: list - :param labels: (Optional) List of strings. Labels applied to the cell. - """ - - def __init__(self, value, timestamp_micros, labels=None): - self.value = value - self.timestamp_micros = timestamp_micros - self.labels = list(labels) if labels is not None else [] - - @classmethod - def from_pb(cls, cell_pb): - """Create a new cell from a Cell protobuf. - - :type cell_pb: :class:`._generated.data_pb2.Cell` - :param cell_pb: The protobuf to convert. - - :rtype: :class:`Cell` - :returns: The cell corresponding to the protobuf. - """ - if cell_pb.labels: - return cls(cell_pb.value, cell_pb.timestamp_micros, labels=cell_pb.labels) - else: - return cls(cell_pb.value, cell_pb.timestamp_micros) - - @property - def timestamp(self): - return _datetime_from_microseconds(self.timestamp_micros) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return ( - other.value == self.value - and other.timestamp_micros == self.timestamp_micros - and other.labels == self.labels - ) - - def __ne__(self, other): - return not self == other - - def __repr__(self): - return "<{name} value={value!r} timestamp={timestamp}>".format( - name=self.__class__.__name__, value=self.value, timestamp=self.timestamp - ) - - -class InvalidChunk(RuntimeError): - """Exception raised to invalid chunk data from back-end.""" diff --git a/google/cloud/bigtable/deprecated/encryption_info.py b/google/cloud/bigtable/encryption_info.py similarity index 93% rename from google/cloud/bigtable/deprecated/encryption_info.py rename to google/cloud/bigtable/encryption_info.py index daa0d9232..1757297bc 100644 --- a/google/cloud/bigtable/deprecated/encryption_info.py +++ b/google/cloud/bigtable/encryption_info.py @@ -14,7 +14,7 @@ """Class for encryption info for tables and backups.""" -from google.cloud.bigtable.deprecated.error import Status +from google.cloud.bigtable.error import Status class EncryptionInfo: @@ -27,7 +27,7 @@ class EncryptionInfo: :type encryption_type: int :param encryption_type: See :class:`enums.EncryptionInfo.EncryptionType` - :type encryption_status: google.cloud.bigtable.deprecated.encryption.Status + :type encryption_status: google.cloud.bigtable.encryption.Status :param encryption_status: The encryption status. :type kms_key_version: str diff --git a/google/cloud/bigtable/deprecated/enums.py b/google/cloud/bigtable/enums.py similarity index 100% rename from google/cloud/bigtable/deprecated/enums.py rename to google/cloud/bigtable/enums.py diff --git a/google/cloud/bigtable/deprecated/error.py b/google/cloud/bigtable/error.py similarity index 100% rename from google/cloud/bigtable/deprecated/error.py rename to google/cloud/bigtable/error.py diff --git a/google/cloud/bigtable/gapic_version.py b/google/cloud/bigtable/gapic_version.py index 8d4f4cfb6..0f1a446f3 100644 --- a/google/cloud/bigtable/gapic_version.py +++ b/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.17.0" # {x-release-please-version} +__version__ = "2.19.0" # {x-release-please-version} diff --git a/google/cloud/bigtable/deprecated/instance.py b/google/cloud/bigtable/instance.py similarity index 91% rename from google/cloud/bigtable/deprecated/instance.py rename to google/cloud/bigtable/instance.py index 33475d261..6d092cefd 100644 --- a/google/cloud/bigtable/deprecated/instance.py +++ b/google/cloud/bigtable/instance.py @@ -16,9 +16,9 @@ import re -from google.cloud.bigtable.deprecated.app_profile import AppProfile -from google.cloud.bigtable.deprecated.cluster import Cluster -from google.cloud.bigtable.deprecated.table import Table +from google.cloud.bigtable.app_profile import AppProfile +from google.cloud.bigtable.cluster import Cluster +from google.cloud.bigtable.table import Table from google.protobuf import field_mask_pb2 @@ -28,7 +28,7 @@ from google.api_core.exceptions import NotFound -from google.cloud.bigtable.deprecated.policy import Policy +from google.cloud.bigtable.policy import Policy import warnings @@ -61,7 +61,7 @@ class Instance(object): :type instance_id: str :param instance_id: The ID of the instance. - :type client: :class:`Client ` + :type client: :class:`Client ` :param client: The client that owns the instance. Provides authorization and a project ID. @@ -75,10 +75,10 @@ class Instance(object): :param instance_type: (Optional) The type of the instance. Possible values are represented by the following constants: - :data:`google.cloud.bigtable.deprecated.enums.Instance.Type.PRODUCTION`. - :data:`google.cloud.bigtable.deprecated.enums.Instance.Type.DEVELOPMENT`, + :data:`google.cloud.bigtable.enums.Instance.Type.PRODUCTION`. + :data:`google.cloud.bigtable.enums.Instance.Type.DEVELOPMENT`, Defaults to - :data:`google.cloud.bigtable.deprecated.enums.Instance.Type.UNSPECIFIED`. + :data:`google.cloud.bigtable.enums.Instance.Type.UNSPECIFIED`. :type labels: dict :param labels: (Optional) Labels are a flexible and lightweight @@ -95,9 +95,9 @@ class Instance(object): :param _state: (`OutputOnly`) The current state of the instance. Possible values are represented by the following constants: - :data:`google.cloud.bigtable.deprecated.enums.Instance.State.STATE_NOT_KNOWN`. - :data:`google.cloud.bigtable.deprecated.enums.Instance.State.READY`. - :data:`google.cloud.bigtable.deprecated.enums.Instance.State.CREATING`. + :data:`google.cloud.bigtable.enums.Instance.State.STATE_NOT_KNOWN`. + :data:`google.cloud.bigtable.enums.Instance.State.READY`. + :data:`google.cloud.bigtable.enums.Instance.State.CREATING`. """ def __init__( @@ -141,7 +141,7 @@ def from_pb(cls, instance_pb, client): :type instance_pb: :class:`instance.Instance` :param instance_pb: An instance protobuf object. - :type client: :class:`Client ` + :type client: :class:`Client ` :param client: The client that owns the instance. :rtype: :class:`Instance` @@ -196,7 +196,7 @@ def name(self): @property def state(self): - """google.cloud.bigtable.deprecated.enums.Instance.State: state of Instance. + """google.cloud.bigtable.enums.Instance.State: state of Instance. For example: @@ -272,12 +272,12 @@ def create( persisting Bigtable data. Possible values are represented by the following constants: - :data:`google.cloud.bigtable.deprecated.enums.StorageType.SSD`. - :data:`google.cloud.bigtable.deprecated.enums.StorageType.HDD`, + :data:`google.cloud.bigtable.enums.StorageType.SSD`. + :data:`google.cloud.bigtable.enums.StorageType.HDD`, Defaults to - :data:`google.cloud.bigtable.deprecated.enums.StorageType.UNSPECIFIED`. + :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. - :type clusters: class:`~[~google.cloud.bigtable.deprecated.cluster.Cluster]` + :type clusters: class:`~[~google.cloud.bigtable.cluster.Cluster]` :param clusters: List of clusters to be created. :rtype: :class:`~google.api_core.operation.Operation` @@ -478,7 +478,7 @@ def get_iam_policy(self, requested_policy_version=None): than the one that was requested, based on the feature syntax in the policy fetched. - :rtype: :class:`google.cloud.bigtable.deprecated.policy.Policy` + :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this instance """ args = {"resource": self.name} @@ -497,7 +497,7 @@ def set_iam_policy(self, policy): existing policy. For more information about policy, please see documentation of - class `google.cloud.bigtable.deprecated.policy.Policy` + class `google.cloud.bigtable.policy.Policy` For example: @@ -506,11 +506,11 @@ class `google.cloud.bigtable.deprecated.policy.Policy` :end-before: [END bigtable_api_set_iam_policy] :dedent: 4 - :type policy: :class:`google.cloud.bigtable.deprecated.policy.Policy` + :type policy: :class:`google.cloud.bigtable.policy.Policy` :param policy: A new IAM policy to replace the current IAM policy of this instance - :rtype: :class:`google.cloud.bigtable.deprecated.policy.Policy` + :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this instance. """ instance_admin_client = self._client.instance_admin_client @@ -586,12 +586,12 @@ def cluster( :param default_storage_type: (Optional) The type of storage Possible values are represented by the following constants: - :data:`google.cloud.bigtable.deprecated.enums.StorageType.SSD`. - :data:`google.cloud.bigtable.deprecated.enums.StorageType.HDD`, + :data:`google.cloud.bigtable.enums.StorageType.SSD`. + :data:`google.cloud.bigtable.enums.StorageType.HDD`, Defaults to - :data:`google.cloud.bigtable.deprecated.enums.StorageType.UNSPECIFIED`. + :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. - :rtype: :class:`~google.cloud.bigtable.deprecated.instance.Cluster` + :rtype: :class:`~google.cloud.bigtable.instance.Cluster` :returns: a cluster owned by this instance. :type kms_key_name: str @@ -635,7 +635,7 @@ def list_clusters(self): :rtype: tuple :returns: (clusters, failed_locations), where 'clusters' is list of - :class:`google.cloud.bigtable.deprecated.instance.Cluster`, and + :class:`google.cloud.bigtable.instance.Cluster`, and 'failed_locations' is a list of locations which could not be resolved. """ @@ -664,7 +664,7 @@ def table(self, table_id, mutation_timeout=None, app_profile_id=None): :type app_profile_id: str :param app_profile_id: (Optional) The unique name of the AppProfile. - :rtype: :class:`Table ` + :rtype: :class:`Table ` :returns: The table owned by this instance. """ return Table( @@ -684,7 +684,7 @@ def list_tables(self): :end-before: [END bigtable_api_list_tables] :dedent: 4 - :rtype: list of :class:`Table ` + :rtype: list of :class:`Table ` :returns: The list of tables owned by the instance. :raises: :class:`ValueError ` if one of the returned tables has a name that is not of the expected format. @@ -731,8 +731,8 @@ def app_profile( :param: routing_policy_type: The type of the routing policy. Possible values are represented by the following constants: - :data:`google.cloud.bigtable.deprecated.enums.RoutingPolicyType.ANY` - :data:`google.cloud.bigtable.deprecated.enums.RoutingPolicyType.SINGLE` + :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY` + :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE` :type: description: str :param: description: (Optional) Long form description of the use @@ -753,7 +753,7 @@ def app_profile( transactional writes for ROUTING_POLICY_TYPE_SINGLE. - :rtype: :class:`~google.cloud.bigtable.deprecated.app_profile.AppProfile>` + :rtype: :class:`~google.cloud.bigtable.app_profile.AppProfile>` :returns: AppProfile for this instance. """ return AppProfile( @@ -776,10 +776,10 @@ def list_app_profiles(self): :end-before: [END bigtable_api_list_app_profiles] :dedent: 4 - :rtype: :list:[`~google.cloud.bigtable.deprecated.app_profile.AppProfile`] - :returns: A :list:[`~google.cloud.bigtable.deprecated.app_profile.AppProfile`]. + :rtype: :list:[`~google.cloud.bigtable.app_profile.AppProfile`] + :returns: A :list:[`~google.cloud.bigtable.app_profile.AppProfile`]. By default, this is a list of - :class:`~google.cloud.bigtable.deprecated.app_profile.AppProfile` + :class:`~google.cloud.bigtable.app_profile.AppProfile` instances. """ resp = self._client.instance_admin_client.list_app_profiles( diff --git a/google/cloud/bigtable/iterators.py b/google/cloud/bigtable/iterators.py deleted file mode 100644 index b20932fb2..000000000 --- a/google/cloud/bigtable/iterators.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import AsyncIterable - -import asyncio -import time -import sys - -from google.cloud.bigtable._read_rows import _ReadRowsOperation -from google.cloud.bigtable.exceptions import IdleTimeout -from google.cloud.bigtable._helpers import _convert_retry_deadline -from google.cloud.bigtable.row import Row - - -class ReadRowsIterator(AsyncIterable[Row]): - """ - Async iterator for ReadRows responses. - """ - - def __init__(self, merger: _ReadRowsOperation): - self._merger: _ReadRowsOperation = merger - self._error: Exception | None = None - self.last_interaction_time = time.time() - self._idle_timeout_task: asyncio.Task[None] | None = None - # wrap merger with a wrapper that properly formats exceptions - self._next_fn = _convert_retry_deadline( - self._merger.__anext__, - self._merger.operation_timeout, - self._merger.transient_errors, - ) - - async def _start_idle_timer(self, idle_timeout: float): - """ - Start a coroutine that will cancel a stream if no interaction - with the iterator occurs for the specified number of seconds. - - Subsequent access to the iterator will raise an IdleTimeout exception. - - Args: - - idle_timeout: number of seconds of inactivity before cancelling the stream - """ - self.last_interaction_time = time.time() - if self._idle_timeout_task is not None: - self._idle_timeout_task.cancel() - self._idle_timeout_task = asyncio.create_task( - self._idle_timeout_coroutine(idle_timeout) - ) - if sys.version_info >= (3, 8): - self._idle_timeout_task.name = "ReadRowsIterator._idle_timeout" - - @property - def active(self): - """ - Returns True if the iterator is still active and has not been closed - """ - return self._error is None - - async def _idle_timeout_coroutine(self, idle_timeout: float): - """ - Coroutine that will cancel a stream if no interaction with the iterator - in the last `idle_timeout` seconds. - """ - while self.active: - next_timeout = self.last_interaction_time + idle_timeout - await asyncio.sleep(next_timeout - time.time()) - if self.last_interaction_time + idle_timeout < time.time() and self.active: - # idle timeout has expired - await self._finish_with_error( - IdleTimeout( - ( - "Timed out waiting for next Row to be consumed. " - f"(idle_timeout={idle_timeout:0.1f}s)" - ) - ) - ) - - def __aiter__(self): - """Implement the async iterator protocol.""" - return self - - async def __anext__(self) -> Row: - """ - Implement the async iterator potocol. - - Return the next item in the stream if active, or - raise an exception if the stream has been closed. - """ - if self._error is not None: - raise self._error - try: - self.last_interaction_time = time.time() - return await self._next_fn() - except Exception as e: - await self._finish_with_error(e) - raise e - - async def _finish_with_error(self, e: Exception): - """ - Helper function to close the stream and clean up resources - after an error has occurred. - """ - if self.active: - await self._merger.aclose() - self._error = e - if self._idle_timeout_task is not None: - self._idle_timeout_task.cancel() - self._idle_timeout_task = None - - async def aclose(self): - """ - Support closing the stream with an explicit call to aclose() - """ - await self._finish_with_error( - StopAsyncIteration(f"{self.__class__.__name__} closed") - ) diff --git a/google/cloud/bigtable/deprecated/policy.py b/google/cloud/bigtable/policy.py similarity index 100% rename from google/cloud/bigtable/deprecated/policy.py rename to google/cloud/bigtable/policy.py diff --git a/google/cloud/bigtable/row.py b/google/cloud/bigtable/row.py index 5fdc1b365..752458a08 100644 --- a/google/cloud/bigtable/row.py +++ b/google/cloud/bigtable/row.py @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,455 +11,1257 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -from __future__ import annotations -from collections import OrderedDict -from typing import Sequence, Generator, overload, Any -from functools import total_ordering +"""User-friendly container for Google Cloud Bigtable Row.""" -from google.cloud.bigtable_v2.types import Row as RowPB -# Type aliases used internally for readability. -_family_type = str -_qualifier_type = bytes +import struct +from google.cloud._helpers import _datetime_from_microseconds # type: ignore +from google.cloud._helpers import _microseconds_from_datetime # type: ignore +from google.cloud._helpers import _to_bytes # type: ignore +from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + +_PACK_I64 = struct.Struct(">q").pack + +MAX_MUTATIONS = 100000 +"""The maximum number of mutations that a row can accumulate.""" + +_MISSING_COLUMN_FAMILY = "Column family {} is not among the cells stored in this row." +_MISSING_COLUMN = ( + "Column {} is not among the cells stored in this row in the column family {}." +) +_MISSING_INDEX = ( + "Index {!r} is not valid for the cells stored in this row for column {} " + "in the column family {}. There are {} such cells." +) -class Row(Sequence["Cell"]): - """ - Model class for row data returned from server - Does not represent all data contained in the row, only data returned by a - query. - Expected to be read-only to users, and written by backend +class Row(object): + """Base representation of a Google Cloud Bigtable Row. - Can be indexed: - cells = row["family", "qualifier"] + This class has three subclasses corresponding to the three + RPC methods for sending row mutations: + + * :class:`DirectRow` for ``MutateRow`` + * :class:`ConditionalRow` for ``CheckAndMutateRow`` + * :class:`AppendRow` for ``ReadModifyWriteRow`` + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table ` + :param table: (Optional) The table that owns the row. """ - __slots__ = ("row_key", "cells", "_index_data") + def __init__(self, row_key, table=None): + self._row_key = _to_bytes(row_key) + self._table = table - def __init__( - self, - key: bytes, - cells: list[Cell], - ): - """ - Initializes a Row object + @property + def row_key(self): + """Row key. - Row objects are not intended to be created by users. - They are returned by the Bigtable backend. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_row_key] + :end-before: [END bigtable_api_row_row_key] + :dedent: 4 + + :rtype: bytes + :returns: The key for the current row. """ - self.row_key = key - self.cells: list[Cell] = cells - # index is lazily created when needed - self._index_data: OrderedDict[ - _family_type, OrderedDict[_qualifier_type, list[Cell]] - ] | None = None + return self._row_key @property - def _index( - self, - ) -> OrderedDict[_family_type, OrderedDict[_qualifier_type, list[Cell]]]: - """ - Returns an index of cells associated with each family and qualifier. + def table(self): + """Row table. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_table] + :end-before: [END bigtable_api_row_table] + :dedent: 4 - The index is lazily created when needed + :rtype: table: :class:`Table ` + :returns: table: The table that owns the row. """ - if self._index_data is None: - self._index_data = OrderedDict() - for cell in self.cells: - self._index_data.setdefault(cell.family, OrderedDict()).setdefault( - cell.qualifier, [] - ).append(cell) - return self._index_data + return self._table - @classmethod - def _from_pb(cls, row_pb: RowPB) -> Row: - """ - Creates a row from a protobuf representation - - Row objects are not intended to be created by users. - They are returned by the Bigtable backend. - """ - row_key: bytes = row_pb.key - cell_list: list[Cell] = [] - for family in row_pb.families: - for column in family.columns: - for cell in column.cells: - new_cell = Cell( - value=cell.value, - row_key=row_key, - family=family.name, - qualifier=column.qualifier, - timestamp_micros=cell.timestamp_micros, - labels=list(cell.labels) if cell.labels else None, - ) - cell_list.append(new_cell) - return cls(row_key, cells=cell_list) - - def get_cells( - self, family: str | None = None, qualifier: str | bytes | None = None - ) -> list[Cell]: - """ - Returns cells sorted in Bigtable native order: - - Family lexicographically ascending - - Qualifier ascending - - Timestamp in reverse chronological order - - If family or qualifier not passed, will include all - - Can also be accessed through indexing: - cells = row["family", "qualifier"] - cells = row["family"] - """ - if family is None: - if qualifier is not None: - # get_cells(None, "qualifier") is not allowed - raise ValueError("Qualifier passed without family") - else: - # return all cells on get_cells() - return self.cells - if qualifier is None: - # return all cells in family on get_cells(family) - return list(self._get_all_from_family(family)) - if isinstance(qualifier, str): - qualifier = qualifier.encode("utf-8") - # return cells in family and qualifier on get_cells(family, qualifier) - if family not in self._index: - raise ValueError(f"Family '{family}' not found in row '{self.row_key!r}'") - if qualifier not in self._index[family]: - raise ValueError( - f"Qualifier '{qualifier!r}' not found in family '{family}' in row '{self.row_key!r}'" - ) - return self._index[family][qualifier] - def _get_all_from_family(self, family: str) -> Generator[Cell, None, None]: +class _SetDeleteRow(Row): + """Row helper for setting or deleting cell values. + + Implements helper methods to add mutations to set or delete cell contents: + + * :meth:`set_cell` + * :meth:`delete` + * :meth:`delete_cell` + * :meth:`delete_cells` + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table ` + :param table: The table that owns the row. + """ + + ALL_COLUMNS = object() + """Sentinel value used to indicate all columns in a column family.""" + + def _get_mutations(self, state=None): + """Gets the list of mutations for a given state. + + This method intended to be implemented by subclasses. + + ``state`` may not need to be used by all subclasses. + + :type state: bool + :param state: The state that the mutation should be + applied in. + + :raises: :class:`NotImplementedError ` + always. """ - Returns all cells in the row for the family_id + raise NotImplementedError + + def _set_cell(self, column_family_id, column, value, timestamp=None, state=None): + """Helper for :meth:`set_cell` + + Adds a mutation to set the value in a specific cell. + + ``state`` is unused by :class:`DirectRow` but is used by + subclasses. + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type value: bytes or :class:`int` + :param value: The value to set in the cell. If an integer is used, + will be interpreted as a 64-bit big-endian signed + integer (8 bytes). + + :type timestamp: :class:`datetime.datetime` + :param timestamp: (Optional) The timestamp of the operation. + + :type state: bool + :param state: (Optional) The state that is passed along to + :meth:`_get_mutations`. """ - if family not in self._index: - raise ValueError(f"Family '{family}' not found in row '{self.row_key!r}'") - for qualifier in self._index[family]: - yield from self._index[family][qualifier] + column = _to_bytes(column) + if isinstance(value, int): + value = _PACK_I64(value) + value = _to_bytes(value) + if timestamp is None: + # Use -1 for current Bigtable server time. + timestamp_micros = -1 + else: + timestamp_micros = _microseconds_from_datetime(timestamp) + # Truncate to millisecond granularity. + timestamp_micros -= timestamp_micros % 1000 + + mutation_val = data_v2_pb2.Mutation.SetCell( + family_name=column_family_id, + column_qualifier=column, + timestamp_micros=timestamp_micros, + value=value, + ) + mutation_pb = data_v2_pb2.Mutation(set_cell=mutation_val) + self._get_mutations(state).append(mutation_pb) - def __str__(self) -> str: + def _delete(self, state=None): + """Helper for :meth:`delete` + + Adds a delete mutation (for the entire row) to the accumulated + mutations. + + ``state`` is unused by :class:`DirectRow` but is used by + subclasses. + + :type state: bool + :param state: (Optional) The state that is passed along to + :meth:`_get_mutations`. """ - Human-readable string representation + mutation_val = data_v2_pb2.Mutation.DeleteFromRow() + mutation_pb = data_v2_pb2.Mutation(delete_from_row=mutation_val) + self._get_mutations(state).append(mutation_pb) + + def _delete_cells(self, column_family_id, columns, time_range=None, state=None): + """Helper for :meth:`delete_cell` and :meth:`delete_cells`. + + ``state`` is unused by :class:`DirectRow` but is used by + subclasses. - { - (family='fam', qualifier=b'col'): [b'value', (+1 more),], - (family='fam', qualifier=b'col2'): [b'other'], - } + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type columns: :class:`list` of :class:`str` / + :func:`unicode `, or :class:`object` + :param columns: The columns within the column family that will have + cells deleted. If :attr:`ALL_COLUMNS` is used then + the entire column family will be deleted from the row. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. + + :type state: bool + :param state: (Optional) The state that is passed along to + :meth:`_get_mutations`. """ - output = ["{"] - for family, qualifier in self.get_column_components(): - cell_list = self[family, qualifier] - line = [f" (family={family!r}, qualifier={qualifier!r}): "] - if len(cell_list) == 0: - line.append("[],") - elif len(cell_list) == 1: - line.append(f"[{cell_list[0]}],") - else: - line.append(f"[{cell_list[0]}, (+{len(cell_list)-1} more)],") - output.append("".join(line)) - output.append("}") - return "\n".join(output) + mutations_list = self._get_mutations(state) + if columns is self.ALL_COLUMNS: + mutation_val = data_v2_pb2.Mutation.DeleteFromFamily( + family_name=column_family_id + ) + mutation_pb = data_v2_pb2.Mutation(delete_from_family=mutation_val) + mutations_list.append(mutation_pb) + else: + delete_kwargs = {} + if time_range is not None: + delete_kwargs["time_range"] = time_range.to_pb() - def __repr__(self): - cell_str_buffer = ["{"] - for family, qualifier in self.get_column_components(): - cell_list = self[family, qualifier] - repr_list = [cell.to_dict() for cell in cell_list] - cell_str_buffer.append(f" ('{family}', {qualifier!r}): {repr_list},") - cell_str_buffer.append("}") - cell_str = "\n".join(cell_str_buffer) - output = f"Row(key={self.row_key!r}, cells={cell_str})" - return output - - def to_dict(self) -> dict[str, Any]: - """ - Returns a dictionary representation of the cell in the Bigtable Row - proto format - - https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#row - """ - family_list = [] - for family_name, qualifier_dict in self._index.items(): - qualifier_list = [] - for qualifier_name, cell_list in qualifier_dict.items(): - cell_dicts = [cell.to_dict() for cell in cell_list] - qualifier_list.append( - {"qualifier": qualifier_name, "cells": cell_dicts} + to_append = [] + for column in columns: + column = _to_bytes(column) + # time_range will never change if present, but the rest of + # delete_kwargs will + delete_kwargs.update( + family_name=column_family_id, column_qualifier=column ) - family_list.append({"name": family_name, "columns": qualifier_list}) - return {"key": self.row_key, "families": family_list} - - # Sequence and Mapping methods - def __iter__(self): - """ - Allow iterating over all cells in the row - """ - return iter(self.cells) - - def __contains__(self, item): - """ - Implements `in` operator - - Works for both cells in the internal list, and `family` or - `(family, qualifier)` pairs associated with the cells - """ - if isinstance(item, _family_type): - return item in self._index - elif ( - isinstance(item, tuple) - and isinstance(item[0], _family_type) - and isinstance(item[1], (bytes, str)) - ): - q = item[1] if isinstance(item[1], bytes) else item[1].encode("utf-8") - return item[0] in self._index and q in self._index[item[0]] - # check if Cell is in Row - return item in self.cells - - @overload - def __getitem__( - self, - index: str | tuple[str, bytes | str], - ) -> list[Cell]: - # overload signature for type checking - pass - - @overload - def __getitem__(self, index: int) -> Cell: - # overload signature for type checking - pass - - @overload - def __getitem__(self, index: slice) -> list[Cell]: - # overload signature for type checking - pass - - def __getitem__(self, index): - """ - Implements [] indexing - - Supports indexing by family, (family, qualifier) pair, - numerical index, and index slicing - """ - if isinstance(index, _family_type): - return self.get_cells(family=index) - elif ( - isinstance(index, tuple) - and isinstance(index[0], _family_type) - and isinstance(index[1], (bytes, str)) - ): - return self.get_cells(family=index[0], qualifier=index[1]) - elif isinstance(index, int) or isinstance(index, slice): - # index is int or slice - return self.cells[index] - else: - raise TypeError( - "Index must be family_id, (family_id, qualifier), int, or slice" - ) + mutation_val = data_v2_pb2.Mutation.DeleteFromColumn(**delete_kwargs) + mutation_pb = data_v2_pb2.Mutation(delete_from_column=mutation_val) + to_append.append(mutation_pb) + + # We don't add the mutations until all columns have been + # processed without error. + mutations_list.extend(to_append) + + +class DirectRow(_SetDeleteRow): + """Google Cloud Bigtable Row for sending "direct" mutations. + + These mutations directly set or delete cell contents: + + * :meth:`set_cell` + * :meth:`delete` + * :meth:`delete_cell` + * :meth:`delete_cells` + + These methods can be used directly:: + + >>> row = table.row(b'row-key1') + >>> row.set_cell(u'fam', b'col1', b'cell-val') + >>> row.delete_cell(u'fam', b'col2') - def __len__(self): + .. note:: + + A :class:`DirectRow` accumulates mutations locally via the + :meth:`set_cell`, :meth:`delete`, :meth:`delete_cell` and + :meth:`delete_cells` methods. To actually send these mutations to the + Google Cloud Bigtable API, you must call :meth:`commit`. + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table ` + :param table: (Optional) The table that owns the row. This is + used for the :meth: `commit` only. Alternatively, + DirectRows can be persisted via + :meth:`~google.cloud.bigtable.table.Table.mutate_rows`. + """ + + def __init__(self, row_key, table=None): + super(DirectRow, self).__init__(row_key, table) + self._pb_mutations = [] + + def _get_mutations(self, state=None): # pylint: disable=unused-argument + """Gets the list of mutations for a given state. + + ``state`` is unused by :class:`DirectRow` but is used by + subclasses. + + :type state: bool + :param state: The state that the mutation should be + applied in. + + :rtype: list + :returns: The list to add new mutations to (for the current state). """ - Implements `len()` operator + return self._pb_mutations + + def get_mutations_size(self): + """Gets the total mutations size for current row + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_get_mutations_size] + :end-before: [END bigtable_api_row_get_mutations_size] + :dedent: 4 """ - return len(self.cells) - def get_column_components(self) -> list[tuple[str, bytes]]: + mutation_size = 0 + for mutation in self._get_mutations(): + mutation_size += mutation._pb.ByteSize() + + return mutation_size + + def set_cell(self, column_family_id, column, value, timestamp=None): + """Sets a value in this row. + + The cell is determined by the ``row_key`` of this :class:`DirectRow` + and the ``column``. The ``column`` must be in an existing + :class:`.ColumnFamily` (as determined by ``column_family_id``). + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_set_cell] + :end-before: [END bigtable_api_row_set_cell] + :dedent: 4 + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type value: bytes or :class:`int` + :param value: The value to set in the cell. If an integer is used, + will be interpreted as a 64-bit big-endian signed + integer (8 bytes). + + :type timestamp: :class:`datetime.datetime` + :param timestamp: (Optional) The timestamp of the operation. """ - Returns a list of (family, qualifier) pairs associated with the cells + self._set_cell(column_family_id, column, value, timestamp=timestamp, state=None) + + def delete(self): + """Deletes this row from the table. + + .. note:: - Pairs can be used for indexing + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_delete] + :end-before: [END bigtable_api_row_delete] + :dedent: 4 """ - return [(f, q) for f in self._index for q in self._index[f]] + self._delete(state=None) - def __eq__(self, other): + def delete_cell(self, column_family_id, column, time_range=None): + """Deletes cell in this row. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_delete_cell] + :end-before: [END bigtable_api_row_delete_cell] + :dedent: 4 + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family that will have a + cell deleted. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. """ - Implements `==` operator - """ - # for performance reasons, check row metadata - # before checking individual cells - if not isinstance(other, Row): - return False - if self.row_key != other.row_key: - return False - if len(self.cells) != len(other.cells): - return False - components = self.get_column_components() - other_components = other.get_column_components() - if len(components) != len(other_components): - return False - if components != other_components: - return False - for family, qualifier in components: - if len(self[family, qualifier]) != len(other[family, qualifier]): - return False - # compare individual cell lists - if self.cells != other.cells: - return False - return True - - def __ne__(self, other) -> bool: - """ - Implements `!=` operator + self._delete_cells( + column_family_id, [column], time_range=time_range, state=None + ) + + def delete_cells(self, column_family_id, columns, time_range=None): + """Deletes cells in this row. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_delete_cells] + :end-before: [END bigtable_api_row_delete_cells] + :dedent: 4 + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type columns: :class:`list` of :class:`str` / + :func:`unicode `, or :class:`object` + :param columns: The columns within the column family that will have + cells deleted. If :attr:`ALL_COLUMNS` is used then + the entire column family will be deleted from the row. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. """ - return not self == other + self._delete_cells(column_family_id, columns, time_range=time_range, state=None) + def commit(self): + """Makes a ``MutateRow`` API request. -class _LastScannedRow(Row): - """A value used to indicate a scanned row that is not returned as part of - a query. + If no mutations have been created in the row, no request is made. - This is used internally to indicate progress in a scan, and improve retry - performance. It is not intended to be used directly by users. - """ + Mutations are applied atomically and in order, meaning that earlier + mutations can be masked / negated by later ones. Cells already present + in the row are left unchanged unless explicitly changed by a mutation. - def __init__(self, row_key): - super().__init__(row_key, []) + After committing the accumulated mutations, resets the local + mutations to an empty list. - def __eq__(self, other): - return isinstance(other, _LastScannedRow) + For example: + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_commit] + :end-before: [END bigtable_api_row_commit] + :dedent: 4 -@total_ordering -class Cell: - """ - Model class for cell data + :rtype: :class:`~google.rpc.status_pb2.Status` + :returns: A response status (`google.rpc.status_pb2.Status`) + representing success or failure of the row committed. + :raises: :exc:`~.table.TooManyMutationsError` if the number of + mutations is greater than 100,000. + """ + response = self._table.mutate_rows([self]) - Does not represent all data contained in the cell, only data returned by a - query. - Expected to be read-only to users, and written by backend - """ + self.clear() + + return response[0] + + def clear(self): + """Removes all currently accumulated mutations on the current row. - __slots__ = ( - "value", - "row_key", - "family", - "qualifier", - "timestamp_micros", - "labels", - ) - - def __init__( - self, - value: bytes, - row_key: bytes, - family: str, - qualifier: bytes | str, - timestamp_micros: int, - labels: list[str] | None = None, - ): - """ - Cell constructor - - Cell objects are not intended to be constructed by users. - They are returned by the Bigtable backend. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_clear] + :end-before: [END bigtable_api_row_clear] + :dedent: 4 """ - self.value = value - self.row_key = row_key - self.family = family - if isinstance(qualifier, str): - qualifier = qualifier.encode() - self.qualifier = qualifier - self.timestamp_micros = timestamp_micros - self.labels = labels if labels is not None else [] + del self._pb_mutations[:] + + +class ConditionalRow(_SetDeleteRow): + """Google Cloud Bigtable Row for sending mutations conditionally. + + Each mutation has an associated state: :data:`True` or :data:`False`. + When :meth:`commit`-ed, the mutations for the :data:`True` + state will be applied if the filter matches any cells in + the row, otherwise the :data:`False` state will be applied. + + A :class:`ConditionalRow` accumulates mutations in the same way a + :class:`DirectRow` does: - def __int__(self) -> int: + * :meth:`set_cell` + * :meth:`delete` + * :meth:`delete_cell` + * :meth:`delete_cells` + + with the only change the extra ``state`` parameter:: + + >>> row_cond = table.row(b'row-key2', filter_=row_filter) + >>> row_cond.set_cell(u'fam', b'col', b'cell-val', state=True) + >>> row_cond.delete_cell(u'fam', b'col', state=False) + + .. note:: + + As with :class:`DirectRow`, to actually send these mutations to the + Google Cloud Bigtable API, you must call :meth:`commit`. + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table ` + :param table: The table that owns the row. + + :type filter_: :class:`.RowFilter` + :param filter_: Filter to be used for conditional mutations. + """ + + def __init__(self, row_key, table, filter_): + super(ConditionalRow, self).__init__(row_key, table) + self._filter = filter_ + self._true_pb_mutations = [] + self._false_pb_mutations = [] + + def _get_mutations(self, state=None): + """Gets the list of mutations for a given state. + + Over-ridden so that the state can be used in: + + * :meth:`set_cell` + * :meth:`delete` + * :meth:`delete_cell` + * :meth:`delete_cells` + + :type state: bool + :param state: The state that the mutation should be + applied in. + + :rtype: list + :returns: The list to add new mutations to (for the current state). """ - Allows casting cell to int - Interprets value as a 64-bit big-endian signed integer, as expected by - ReadModifyWrite increment rule + if state: + return self._true_pb_mutations + else: + return self._false_pb_mutations + + def commit(self): + """Makes a ``CheckAndMutateRow`` API request. + + If no mutations have been created in the row, no request is made. + + The mutations will be applied conditionally, based on whether the + filter matches any cells in the :class:`ConditionalRow` or not. (Each + method which adds a mutation has a ``state`` parameter for this + purpose.) + + Mutations are applied atomically and in order, meaning that earlier + mutations can be masked / negated by later ones. Cells already present + in the row are left unchanged unless explicitly changed by a mutation. + + After committing the accumulated mutations, resets the local + mutations. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_commit] + :end-before: [END bigtable_api_row_commit] + :dedent: 4 + + :rtype: bool + :returns: Flag indicating if the filter was matched (which also + indicates which set of mutations were applied by the server). + :raises: :class:`ValueError ` if the number of + mutations exceeds the :data:`MAX_MUTATIONS`. """ - return int.from_bytes(self.value, byteorder="big", signed=True) + true_mutations = self._get_mutations(state=True) + false_mutations = self._get_mutations(state=False) + num_true_mutations = len(true_mutations) + num_false_mutations = len(false_mutations) + if num_true_mutations == 0 and num_false_mutations == 0: + return + if num_true_mutations > MAX_MUTATIONS or num_false_mutations > MAX_MUTATIONS: + raise ValueError( + "Exceed the maximum allowable mutations (%d). Had %s true " + "mutations and %d false mutations." + % (MAX_MUTATIONS, num_true_mutations, num_false_mutations) + ) + + data_client = self._table._instance._client.table_data_client + resp = data_client.check_and_mutate_row( + table_name=self._table.name, + row_key=self._row_key, + predicate_filter=self._filter.to_pb(), + app_profile_id=self._table._app_profile_id, + true_mutations=true_mutations, + false_mutations=false_mutations, + ) + self.clear() + return resp.predicate_matched + + # pylint: disable=arguments-differ + def set_cell(self, column_family_id, column, value, timestamp=None, state=True): + """Sets a value in this row. + + The cell is determined by the ``row_key`` of this + :class:`ConditionalRow` and the ``column``. The ``column`` must be in + an existing :class:`.ColumnFamily` (as determined by + ``column_family_id``). + + .. note:: - def to_dict(self) -> dict[str, Any]: + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_set_cell] + :end-before: [END bigtable_api_row_set_cell] + :dedent: 4 + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type value: bytes or :class:`int` + :param value: The value to set in the cell. If an integer is used, + will be interpreted as a 64-bit big-endian signed + integer (8 bytes). + + :type timestamp: :class:`datetime.datetime` + :param timestamp: (Optional) The timestamp of the operation. + + :type state: bool + :param state: (Optional) The state that the mutation should be + applied in. Defaults to :data:`True`. """ - Returns a dictionary representation of the cell in the Bigtable Cell - proto format + self._set_cell( + column_family_id, column, value, timestamp=timestamp, state=state + ) + + def delete(self, state=True): + """Deletes this row from the table. + + .. note:: - https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#cell + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_delete] + :end-before: [END bigtable_api_row_delete] + :dedent: 4 + + :type state: bool + :param state: (Optional) The state that the mutation should be + applied in. Defaults to :data:`True`. """ - cell_dict: dict[str, Any] = { - "value": self.value, - } - cell_dict["timestamp_micros"] = self.timestamp_micros - if self.labels: - cell_dict["labels"] = self.labels - return cell_dict + self._delete(state=state) + + def delete_cell(self, column_family_id, column, time_range=None, state=True): + """Deletes cell in this row. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_delete_cell] + :end-before: [END bigtable_api_row_delete_cell] + :dedent: 4 + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family that will have a + cell deleted. - def __str__(self) -> str: + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. + + :type state: bool + :param state: (Optional) The state that the mutation should be + applied in. Defaults to :data:`True`. """ - Allows casting cell to str - Prints encoded byte string, same as printing value directly. + self._delete_cells( + column_family_id, [column], time_range=time_range, state=state + ) + + def delete_cells(self, column_family_id, columns, time_range=None, state=True): + """Deletes cells in this row. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_delete_cells] + :end-before: [END bigtable_api_row_delete_cells] + :dedent: 4 + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type columns: :class:`list` of :class:`str` / + :func:`unicode `, or :class:`object` + :param columns: The columns within the column family that will have + cells deleted. If :attr:`ALL_COLUMNS` is used then the + entire column family will be deleted from the row. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. + + :type state: bool + :param state: (Optional) The state that the mutation should be + applied in. Defaults to :data:`True`. """ - return str(self.value) + self._delete_cells( + column_family_id, columns, time_range=time_range, state=state + ) - def __repr__(self): + # pylint: enable=arguments-differ + + def clear(self): + """Removes all currently accumulated mutations on the current row. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_clear] + :end-before: [END bigtable_api_row_clear] + :dedent: 4 """ - Returns a string representation of the cell + del self._true_pb_mutations[:] + del self._false_pb_mutations[:] + + +class AppendRow(Row): + """Google Cloud Bigtable Row for sending append mutations. + + These mutations are intended to augment the value of an existing cell + and uses the methods: + + * :meth:`append_cell_value` + * :meth:`increment_cell_value` + + The first works by appending bytes and the second by incrementing an + integer (stored in the cell as 8 bytes). In either case, if the + cell is empty, assumes the default empty value (empty string for + bytes or 0 for integer). + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table ` + :param table: The table that owns the row. + """ + + def __init__(self, row_key, table): + super(AppendRow, self).__init__(row_key, table) + self._rule_pb_list = [] + + def clear(self): + """Removes all currently accumulated modifications on current row. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_clear] + :end-before: [END bigtable_api_row_clear] + :dedent: 4 """ - return f"Cell(value={self.value!r}, row_key={self.row_key!r}, family='{self.family}', qualifier={self.qualifier!r}, timestamp_micros={self.timestamp_micros}, labels={self.labels})" + del self._rule_pb_list[:] + + def append_cell_value(self, column_family_id, column, value): + """Appends a value to an existing cell. + + .. note:: - """For Bigtable native ordering""" + This method adds a read-modify rule protobuf to the accumulated + read-modify rules on this row, but does not make an API + request. To actually send an API request (with the rules) to the + Google Cloud Bigtable API, call :meth:`commit`. - def __lt__(self, other) -> bool: + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_append_cell_value] + :end-before: [END bigtable_api_row_append_cell_value] + :dedent: 4 + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type value: bytes + :param value: The value to append to the existing value in the cell. If + the targeted cell is unset, it will be treated as + containing the empty string. """ - Implements `<` operator + column = _to_bytes(column) + value = _to_bytes(value) + rule_pb = data_v2_pb2.ReadModifyWriteRule( + family_name=column_family_id, column_qualifier=column, append_value=value + ) + self._rule_pb_list.append(rule_pb) + + def increment_cell_value(self, column_family_id, column, int_value): + """Increments a value in an existing cell. + + Assumes the value in the cell is stored as a 64 bit integer + serialized to bytes. + + .. note:: + + This method adds a read-modify rule protobuf to the accumulated + read-modify rules on this row, but does not make an API + request. To actually send an API request (with the rules) to the + Google Cloud Bigtable API, call :meth:`commit`. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_increment_cell_value] + :end-before: [END bigtable_api_row_increment_cell_value] + :dedent: 4 + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type int_value: int + :param int_value: The value to increment the existing value in the cell + by. If the targeted cell is unset, it will be treated + as containing a zero. Otherwise, the targeted cell + must contain an 8-byte value (interpreted as a 64-bit + big-endian signed integer), or the entire request + will fail. """ - if not isinstance(other, Cell): - return NotImplemented - this_ordering = ( - self.family, - self.qualifier, - -self.timestamp_micros, - self.value, - self.labels, + column = _to_bytes(column) + rule_pb = data_v2_pb2.ReadModifyWriteRule( + family_name=column_family_id, + column_qualifier=column, + increment_amount=int_value, ) - other_ordering = ( - other.family, - other.qualifier, - -other.timestamp_micros, - other.value, - other.labels, + self._rule_pb_list.append(rule_pb) + + def commit(self): + """Makes a ``ReadModifyWriteRow`` API request. + + This commits modifications made by :meth:`append_cell_value` and + :meth:`increment_cell_value`. If no modifications were made, makes + no API request and just returns ``{}``. + + Modifies a row atomically, reading the latest existing + timestamp / value from the specified columns and writing a new value by + appending / incrementing. The new cell created uses either the current + server time or the highest timestamp of a cell in that column (if it + exceeds the server time). + + After committing the accumulated mutations, resets the local mutations. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_commit] + :end-before: [END bigtable_api_row_commit] + :dedent: 4 + + :rtype: dict + :returns: The new contents of all modified cells. Returned as a + dictionary of column families, each of which holds a + dictionary of columns. Each column contains a list of cells + modified. Each cell is represented with a two-tuple with the + value (in bytes) and the timestamp for the cell. + :raises: :class:`ValueError ` if the number of + mutations exceeds the :data:`MAX_MUTATIONS`. + """ + num_mutations = len(self._rule_pb_list) + if num_mutations == 0: + return {} + if num_mutations > MAX_MUTATIONS: + raise ValueError( + "%d total append mutations exceed the maximum " + "allowable %d." % (num_mutations, MAX_MUTATIONS) + ) + + data_client = self._table._instance._client.table_data_client + row_response = data_client.read_modify_write_row( + table_name=self._table.name, + row_key=self._row_key, + rules=self._rule_pb_list, + app_profile_id=self._table._app_profile_id, ) - return this_ordering < other_ordering - def __eq__(self, other) -> bool: + # Reset modifications after commit-ing request. + self.clear() + + # NOTE: We expect row_response.key == self._row_key but don't check. + return _parse_rmw_row_response(row_response) + + +def _parse_rmw_row_response(row_response): + """Parses the response to a ``ReadModifyWriteRow`` request. + + :type row_response: :class:`.data_v2_pb2.Row` + :param row_response: The response row (with only modified cells) from a + ``ReadModifyWriteRow`` request. + + :rtype: dict + :returns: The new contents of all modified cells. Returned as a + dictionary of column families, each of which holds a + dictionary of columns. Each column contains a list of cells + modified. Each cell is represented with a two-tuple with the + value (in bytes) and the timestamp for the cell. For example: + + .. code:: python + + { + u'col-fam-id': { + b'col-name1': [ + (b'cell-val', datetime.datetime(...)), + (b'cell-val-newer', datetime.datetime(...)), + ], + b'col-name2': [ + (b'altcol-cell-val', datetime.datetime(...)), + ], + }, + u'col-fam-id2': { + b'col-name3-but-other-fam': [ + (b'foo', datetime.datetime(...)), + ], + }, + } + """ + result = {} + for column_family in row_response.row.families: + column_family_id, curr_family = _parse_family_pb(column_family) + result[column_family_id] = curr_family + return result + + +def _parse_family_pb(family_pb): + """Parses a Family protobuf into a dictionary. + + :type family_pb: :class:`._generated.data_pb2.Family` + :param family_pb: A protobuf + + :rtype: tuple + :returns: A string and dictionary. The string is the name of the + column family and the dictionary has column names (within the + family) as keys and cell lists as values. Each cell is + represented with a two-tuple with the value (in bytes) and the + timestamp for the cell. For example: + + .. code:: python + + { + b'col-name1': [ + (b'cell-val', datetime.datetime(...)), + (b'cell-val-newer', datetime.datetime(...)), + ], + b'col-name2': [ + (b'altcol-cell-val', datetime.datetime(...)), + ], + } + """ + result = {} + for column in family_pb.columns: + result[column.qualifier] = cells = [] + for cell in column.cells: + val_pair = (cell.value, _datetime_from_microseconds(cell.timestamp_micros)) + cells.append(val_pair) + + return family_pb.name, result + + +class PartialRowData(object): + """Representation of partial row in a Google Cloud Bigtable Table. + + These are expected to be updated directly from a + :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` + + :type row_key: bytes + :param row_key: The key for the row holding the (partial) data. + """ + + def __init__(self, row_key): + self._row_key = row_key + self._cells = {} + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other._row_key == self._row_key and other._cells == self._cells + + def __ne__(self, other): + return not self == other + + def to_dict(self): + """Convert the cells to a dictionary. + + This is intended to be used with HappyBase, so the column family and + column qualiers are combined (with ``:``). + + :rtype: dict + :returns: Dictionary containing all the data in the cells of this row. """ - Implements `==` operator + result = {} + for column_family_id, columns in self._cells.items(): + for column_qual, cells in columns.items(): + key = _to_bytes(column_family_id) + b":" + _to_bytes(column_qual) + result[key] = cells + return result + + @property + def cells(self): + """Property returning all the cells accumulated on this partial row. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_data_cells] + :end-before: [END bigtable_api_row_data_cells] + :dedent: 4 + + :rtype: dict + :returns: Dictionary of the :class:`Cell` objects accumulated. This + dictionary has two-levels of keys (first for column families + and second for column names/qualifiers within a family). For + a given column, a list of :class:`Cell` objects is stored. """ - if not isinstance(other, Cell): - return NotImplemented - return ( - self.row_key == other.row_key - and self.family == other.family - and self.qualifier == other.qualifier - and self.value == other.value - and self.timestamp_micros == other.timestamp_micros - and len(self.labels) == len(other.labels) - and all([label in other.labels for label in self.labels]) - ) + return self._cells - def __ne__(self, other) -> bool: + @property + def row_key(self): + """Getter for the current (partial) row's key. + + :rtype: bytes + :returns: The current (partial) row's key. """ - Implements `!=` operator + return self._row_key + + def find_cells(self, column_family_id, column): + """Get a time series of cells stored on this instance. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_find_cells] + :end-before: [END bigtable_api_row_find_cells] + :dedent: 4 + + Args: + column_family_id (str): The ID of the column family. Must be of the + form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + column (bytes): The column within the column family where the cells + are located. + + Returns: + List[~google.cloud.bigtable.row_data.Cell]: The cells stored in the + specified column. + + Raises: + KeyError: If ``column_family_id`` is not among the cells stored + in this row. + KeyError: If ``column`` is not among the cells stored in this row + for the given ``column_family_id``. """ - return not self == other + try: + column_family = self._cells[column_family_id] + except KeyError: + raise KeyError(_MISSING_COLUMN_FAMILY.format(column_family_id)) + + try: + cells = column_family[column] + except KeyError: + raise KeyError(_MISSING_COLUMN.format(column, column_family_id)) + + return cells + + def cell_value(self, column_family_id, column, index=0): + """Get a single cell value stored on this instance. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_cell_value] + :end-before: [END bigtable_api_row_cell_value] + :dedent: 4 + + Args: + column_family_id (str): The ID of the column family. Must be of the + form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + column (bytes): The column within the column family where the cell + is located. + index (Optional[int]): The offset within the series of values. If + not specified, will return the first cell. - def __hash__(self): + Returns: + ~google.cloud.bigtable.row_data.Cell value: The cell value stored + in the specified column and specified index. + + Raises: + KeyError: If ``column_family_id`` is not among the cells stored + in this row. + KeyError: If ``column`` is not among the cells stored in this row + for the given ``column_family_id``. + IndexError: If ``index`` cannot be found within the cells stored + in this row for the given ``column_family_id``, ``column`` + pair. """ - Implements `hash()` function to fingerprint cell + cells = self.find_cells(column_family_id, column) + + try: + cell = cells[index] + except (TypeError, IndexError): + num_cells = len(cells) + msg = _MISSING_INDEX.format(index, column, column_family_id, num_cells) + raise IndexError(msg) + + return cell.value + + def cell_values(self, column_family_id, column, max_count=None): + """Get a time series of cells stored on this instance. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_cell_values] + :end-before: [END bigtable_api_row_cell_values] + :dedent: 4 + + Args: + column_family_id (str): The ID of the column family. Must be of the + form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + column (bytes): The column within the column family where the cells + are located. + max_count (int): The maximum number of cells to use. + + Returns: + A generator which provides: cell.value, cell.timestamp_micros + for each cell in the list of cells + + Raises: + KeyError: If ``column_family_id`` is not among the cells stored + in this row. + KeyError: If ``column`` is not among the cells stored in this row + for the given ``column_family_id``. """ - return hash( - ( - self.row_key, - self.family, - self.qualifier, - self.value, - self.timestamp_micros, - tuple(self.labels), - ) + cells = self.find_cells(column_family_id, column) + if max_count is None: + max_count = len(cells) + + for index, cell in enumerate(cells): + if index == max_count: + break + + yield cell.value, cell.timestamp_micros + + +class Cell(object): + """Representation of a Google Cloud Bigtable Cell. + + :type value: bytes + :param value: The value stored in the cell. + + :type timestamp_micros: int + :param timestamp_micros: The timestamp_micros when the cell was stored. + + :type labels: list + :param labels: (Optional) List of strings. Labels applied to the cell. + """ + + def __init__(self, value, timestamp_micros, labels=None): + self.value = value + self.timestamp_micros = timestamp_micros + self.labels = list(labels) if labels is not None else [] + + @classmethod + def from_pb(cls, cell_pb): + """Create a new cell from a Cell protobuf. + + :type cell_pb: :class:`._generated.data_pb2.Cell` + :param cell_pb: The protobuf to convert. + + :rtype: :class:`Cell` + :returns: The cell corresponding to the protobuf. + """ + if cell_pb.labels: + return cls(cell_pb.value, cell_pb.timestamp_micros, labels=cell_pb.labels) + else: + return cls(cell_pb.value, cell_pb.timestamp_micros) + + @property + def timestamp(self): + return _datetime_from_microseconds(self.timestamp_micros) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return ( + other.value == self.value + and other.timestamp_micros == self.timestamp_micros + and other.labels == self.labels + ) + + def __ne__(self, other): + return not self == other + + def __repr__(self): + return "<{name} value={value!r} timestamp={timestamp}>".format( + name=self.__class__.__name__, value=self.value, timestamp=self.timestamp ) + + +class InvalidChunk(RuntimeError): + """Exception raised to invalid chunk data from back-end.""" diff --git a/google/cloud/bigtable/deprecated/row_data.py b/google/cloud/bigtable/row_data.py similarity index 97% rename from google/cloud/bigtable/deprecated/row_data.py rename to google/cloud/bigtable/row_data.py index 9daa1ed8f..e11379108 100644 --- a/google/cloud/bigtable/deprecated/row_data.py +++ b/google/cloud/bigtable/row_data.py @@ -23,10 +23,10 @@ from google.api_core import retry from google.cloud._helpers import _to_bytes # type: ignore -from google.cloud.bigtable.deprecated.row_merger import _RowMerger, _State +from google.cloud.bigtable.row_merger import _RowMerger, _State from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 from google.cloud.bigtable_v2.types import data as data_v2_pb2 -from google.cloud.bigtable.deprecated.row import Cell, InvalidChunk, PartialRowData +from google.cloud.bigtable.row import Cell, InvalidChunk, PartialRowData # Some classes need to be re-exported here to keep backwards @@ -98,7 +98,7 @@ def _retry_read_rows_exception(exc): """The default retry strategy to be used on retry-able errors. Used by -:meth:`~google.cloud.bigtable.deprecated.row_data.PartialRowsData._read_next_response`. +:meth:`~google.cloud.bigtable.row_data.PartialRowsData._read_next_response`. """ @@ -157,7 +157,9 @@ def __init__(self, read_method, request, retry=DEFAULT_RETRY_READ_ROWS): # Otherwise there is a risk of entering an infinite loop that resets # the timeout counter just before it being triggered. The increment # by 1 second here is customary but should not be much less than that. - self.response_iterator = read_method(request, timeout=self.retry._deadline + 1) + self.response_iterator = read_method( + request, timeout=self.retry._deadline + 1, retry=self.retry + ) self.rows = {} diff --git a/google/cloud/bigtable/row_filters.py b/google/cloud/bigtable/row_filters.py index b2fae6971..53192acc8 100644 --- a/google/cloud/bigtable/row_filters.py +++ b/google/cloud/bigtable/row_filters.py @@ -13,25 +13,18 @@ # limitations under the License. """Filters for Google Cloud Bigtable Row classes.""" -from __future__ import annotations import struct -from typing import Any, Sequence, TYPE_CHECKING, overload -from abc import ABC, abstractmethod from google.cloud._helpers import _microseconds_from_datetime # type: ignore from google.cloud._helpers import _to_bytes # type: ignore from google.cloud.bigtable_v2.types import data as data_v2_pb2 -if TYPE_CHECKING: - # import dependencies when type checking - from datetime import datetime - _PACK_I64 = struct.Struct(">q").pack -class RowFilter(ABC): +class RowFilter(object): """Basic filter to apply to cells in a row. These values can be combined via :class:`RowFilterChain`, @@ -42,30 +35,15 @@ class RowFilter(ABC): This class is a do-nothing base class for all row filters. """ - def _to_pb(self) -> data_v2_pb2.RowFilter: - """Converts the row filter to a protobuf. - - Returns: The converted current object. - """ - return data_v2_pb2.RowFilter(**self.to_dict()) - - @abstractmethod - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - pass - - def __repr__(self) -> str: - return f"{self.__class__.__name__}()" - -class _BoolFilter(RowFilter, ABC): +class _BoolFilter(RowFilter): """Row filter that uses a boolean flag. :type flag: bool :param flag: An indicator if a setting is turned on or off. """ - def __init__(self, flag: bool): + def __init__(self, flag): self.flag = flag def __eq__(self, other): @@ -76,9 +54,6 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def __repr__(self) -> str: - return f"{self.__class__.__name__}(flag={self.flag})" - class SinkFilter(_BoolFilter): """Advanced row filter to skip parent filters. @@ -91,9 +66,13 @@ class SinkFilter(_BoolFilter): of a :class:`ConditionalRowFilter`. """ - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"sink": self.flag} + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(sink=self.flag) class PassAllFilter(_BoolFilter): @@ -105,9 +84,13 @@ class PassAllFilter(_BoolFilter): completeness. """ - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"pass_all_filter": self.flag} + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(pass_all_filter=self.flag) class BlockAllFilter(_BoolFilter): @@ -118,12 +101,16 @@ class BlockAllFilter(_BoolFilter): temporarily disabling just part of a filter. """ - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"block_all_filter": self.flag} + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(block_all_filter=self.flag) -class _RegexFilter(RowFilter, ABC): +class _RegexFilter(RowFilter): """Row filter that uses a regular expression. The ``regex`` must be valid RE2 patterns. See Google's @@ -137,8 +124,8 @@ class _RegexFilter(RowFilter, ABC): will be encoded as ASCII. """ - def __init__(self, regex: str | bytes): - self.regex: bytes = _to_bytes(regex) + def __init__(self, regex): + self.regex = _to_bytes(regex) def __eq__(self, other): if not isinstance(other, self.__class__): @@ -148,9 +135,6 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def __repr__(self) -> str: - return f"{self.__class__.__name__}(regex={self.regex!r})" - class RowKeyRegexFilter(_RegexFilter): """Row filter for a row key regular expression. @@ -175,9 +159,13 @@ class RowKeyRegexFilter(_RegexFilter): since the row key is already specified. """ - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"row_key_regex_filter": self.regex} + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(row_key_regex_filter=self.regex) class RowSampleFilter(RowFilter): @@ -188,8 +176,8 @@ class RowSampleFilter(RowFilter): interval ``(0, 1)`` The end points are excluded). """ - def __init__(self, sample: float): - self.sample: float = sample + def __init__(self, sample): + self.sample = sample def __eq__(self, other): if not isinstance(other, self.__class__): @@ -199,12 +187,13 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"row_sample_filter": self.sample} + def to_pb(self): + """Converts the row filter to a protobuf. - def __repr__(self) -> str: - return f"{self.__class__.__name__}(sample={self.sample})" + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(row_sample_filter=self.sample) class FamilyNameRegexFilter(_RegexFilter): @@ -222,9 +211,13 @@ class FamilyNameRegexFilter(_RegexFilter): used as a literal. """ - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"family_name_regex_filter": self.regex} + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(family_name_regex_filter=self.regex) class ColumnQualifierRegexFilter(_RegexFilter): @@ -248,9 +241,13 @@ class ColumnQualifierRegexFilter(_RegexFilter): match this regex (irrespective of column family). """ - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"column_qualifier_regex_filter": self.regex} + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(column_qualifier_regex_filter=self.regex) class TimestampRange(object): @@ -265,9 +262,9 @@ class TimestampRange(object): range. If omitted, no upper bound is used. """ - def __init__(self, start: "datetime" | None = None, end: "datetime" | None = None): - self.start: "datetime" | None = start - self.end: "datetime" | None = end + def __init__(self, start=None, end=None): + self.start = start + self.end = end def __eq__(self, other): if not isinstance(other, self.__class__): @@ -277,29 +274,23 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def _to_pb(self) -> data_v2_pb2.TimestampRange: + def to_pb(self): """Converts the :class:`TimestampRange` to a protobuf. - Returns: The converted current object. + :rtype: :class:`.data_v2_pb2.TimestampRange` + :returns: The converted current object. """ - return data_v2_pb2.TimestampRange(**self.to_dict()) - - def to_dict(self) -> dict[str, int]: - """Converts the timestamp range to a dict representation.""" timestamp_range_kwargs = {} if self.start is not None: - start_time = _microseconds_from_datetime(self.start) // 1000 * 1000 - timestamp_range_kwargs["start_timestamp_micros"] = start_time + timestamp_range_kwargs["start_timestamp_micros"] = ( + _microseconds_from_datetime(self.start) // 1000 * 1000 + ) if self.end is not None: end_time = _microseconds_from_datetime(self.end) if end_time % 1000 != 0: - # if not a whole milisecond value, round up end_time = end_time // 1000 * 1000 + 1000 timestamp_range_kwargs["end_timestamp_micros"] = end_time - return timestamp_range_kwargs - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(start={self.start}, end={self.end})" + return data_v2_pb2.TimestampRange(**timestamp_range_kwargs) class TimestampRangeFilter(RowFilter): @@ -309,8 +300,8 @@ class TimestampRangeFilter(RowFilter): :param range_: Range of time that cells should match against. """ - def __init__(self, start: "datetime" | None = None, end: "datetime" | None = None): - self.range_: TimestampRange = TimestampRange(start, end) + def __init__(self, range_): + self.range_ = range_ def __eq__(self, other): if not isinstance(other, self.__class__): @@ -320,22 +311,16 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def _to_pb(self) -> data_v2_pb2.RowFilter: + def to_pb(self): """Converts the row filter to a protobuf. First converts the ``range_`` on the current object to a protobuf and then uses it in the ``timestamp_range_filter`` field. - Returns: The converted current object. + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. """ - return data_v2_pb2.RowFilter(timestamp_range_filter=self.range_._to_pb()) - - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"timestamp_range_filter": self.range_.to_dict()} - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(start={self.range_.start!r}, end={self.range_.end!r})" + return data_v2_pb2.RowFilter(timestamp_range_filter=self.range_.to_pb()) class ColumnRangeFilter(RowFilter): @@ -345,72 +330,71 @@ class ColumnRangeFilter(RowFilter): By default, we include them both, but this can be changed with optional flags. - :type family_id: str - :param family_id: The column family that contains the columns. Must + :type column_family_id: str + :param column_family_id: The column family that contains the columns. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - :type start_qualifier: bytes - :param start_qualifier: The start of the range of columns. If no value is + :type start_column: bytes + :param start_column: The start of the range of columns. If no value is used, the backend applies no upper bound to the values. - :type end_qualifier: bytes - :param end_qualifier: The end of the range of columns. If no value is used, + :type end_column: bytes + :param end_column: The end of the range of columns. If no value is used, the backend applies no upper bound to the values. :type inclusive_start: bool :param inclusive_start: Boolean indicating if the start column should be included in the range (or excluded). Defaults - to :data:`True` if ``start_qualifier`` is passed and + to :data:`True` if ``start_column`` is passed and no ``inclusive_start`` was given. :type inclusive_end: bool :param inclusive_end: Boolean indicating if the end column should be included in the range (or excluded). Defaults - to :data:`True` if ``end_qualifier`` is passed and + to :data:`True` if ``end_column`` is passed and no ``inclusive_end`` was given. :raises: :class:`ValueError ` if ``inclusive_start`` - is set but no ``start_qualifier`` is given or if ``inclusive_end`` - is set but no ``end_qualifier`` is given + is set but no ``start_column`` is given or if ``inclusive_end`` + is set but no ``end_column`` is given """ def __init__( self, - family_id: str, - start_qualifier: bytes | None = None, - end_qualifier: bytes | None = None, - inclusive_start: bool | None = None, - inclusive_end: bool | None = None, + column_family_id, + start_column=None, + end_column=None, + inclusive_start=None, + inclusive_end=None, ): + self.column_family_id = column_family_id + if inclusive_start is None: inclusive_start = True - elif start_qualifier is None: + elif start_column is None: raise ValueError( - "inclusive_start was specified but no start_qualifier was given." + "Inclusive start was specified but no " "start column was given." ) + self.start_column = start_column + self.inclusive_start = inclusive_start + if inclusive_end is None: inclusive_end = True - elif end_qualifier is None: + elif end_column is None: raise ValueError( - "inclusive_end was specified but no end_qualifier was given." + "Inclusive end was specified but no " "end column was given." ) - - self.family_id = family_id - - self.start_qualifier = start_qualifier - self.inclusive_start = inclusive_start - - self.end_qualifier = end_qualifier + self.end_column = end_column self.inclusive_end = inclusive_end def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return ( - other.family_id == self.family_id - and other.start_qualifier == self.start_qualifier - and other.end_qualifier == self.end_qualifier + other.column_family_id == self.column_family_id + and other.start_column == self.start_column + and other.end_column == self.end_column and other.inclusive_start == self.inclusive_start and other.inclusive_end == self.inclusive_end ) @@ -418,41 +402,31 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def _to_pb(self) -> data_v2_pb2.RowFilter: + def to_pb(self): """Converts the row filter to a protobuf. First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it in the ``column_range_filter`` field. - Returns: The converted current object. + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. """ - column_range = data_v2_pb2.ColumnRange(**self.range_to_dict()) - return data_v2_pb2.RowFilter(column_range_filter=column_range) - - def range_to_dict(self) -> dict[str, str | bytes]: - """Converts the column range range to a dict representation.""" - column_range_kwargs: dict[str, str | bytes] = {} - column_range_kwargs["family_name"] = self.family_id - if self.start_qualifier is not None: + column_range_kwargs = {"family_name": self.column_family_id} + if self.start_column is not None: if self.inclusive_start: key = "start_qualifier_closed" else: key = "start_qualifier_open" - column_range_kwargs[key] = _to_bytes(self.start_qualifier) - if self.end_qualifier is not None: + column_range_kwargs[key] = _to_bytes(self.start_column) + if self.end_column is not None: if self.inclusive_end: key = "end_qualifier_closed" else: key = "end_qualifier_open" - column_range_kwargs[key] = _to_bytes(self.end_qualifier) - return column_range_kwargs - - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"column_range_filter": self.range_to_dict()} + column_range_kwargs[key] = _to_bytes(self.end_column) - def __repr__(self) -> str: - return f"{self.__class__.__name__}(family_id='{self.family_id}', start_qualifier={self.start_qualifier!r}, end_qualifier={self.end_qualifier!r}, inclusive_start={self.inclusive_start}, inclusive_end={self.inclusive_end})" + column_range = data_v2_pb2.ColumnRange(**column_range_kwargs) + return data_v2_pb2.RowFilter(column_range_filter=column_range) class ValueRegexFilter(_RegexFilter): @@ -476,64 +450,29 @@ class ValueRegexFilter(_RegexFilter): match this regex. String values will be encoded as ASCII. """ - def to_dict(self) -> dict[str, bytes]: - """Converts the row filter to a dict representation.""" - return {"value_regex_filter": self.regex} + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(value_regex_filter=self.regex) -class LiteralValueFilter(ValueRegexFilter): +class ExactValueFilter(ValueRegexFilter): """Row filter for an exact value. :type value: bytes or str or int :param value: - a literal string, integer, or the equivalent bytes. - Integer values will be packed into signed 8-bytes. + a literal string encodable as ASCII, or the + equivalent bytes, or an integer (which will be packed into 8-bytes). """ - def __init__(self, value: bytes | str | int): + def __init__(self, value): if isinstance(value, int): value = _PACK_I64(value) - elif isinstance(value, str): - value = value.encode("utf-8") - value = self._write_literal_regex(value) - super(LiteralValueFilter, self).__init__(value) - - @staticmethod - def _write_literal_regex(input_bytes: bytes) -> bytes: - """ - Escape re2 special characters from literal bytes. - - Extracted from: re2 QuoteMeta: - https://github.com/google/re2/blob/70f66454c255080a54a8da806c52d1f618707f8a/re2/re2.cc#L456 - """ - result = bytearray() - for byte in input_bytes: - # If this is the part of a UTF8 or Latin1 character, we need \ - # to copy this byte without escaping. Experimentally this is \ - # what works correctly with the regexp library. \ - utf8_latin1_check = (byte & 128) == 0 - if ( - (byte < ord("a") or byte > ord("z")) - and (byte < ord("A") or byte > ord("Z")) - and (byte < ord("0") or byte > ord("9")) - and byte != ord("_") - and utf8_latin1_check - ): - if byte == 0: - # Special handling for null chars. - # Note that this special handling is not strictly required for RE2, - # but this quoting is required for other regexp libraries such as - # PCRE. - # Can't use "\\0" since the next character might be a digit. - result.extend([ord("\\"), ord("x"), ord("0"), ord("0")]) - continue - result.append(ord(b"\\")) - result.append(byte) - return bytes(result) - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(value={self.regex!r})" + super(ExactValueFilter, self).__init__(value) class ValueRangeFilter(RowFilter): @@ -571,29 +510,25 @@ class ValueRangeFilter(RowFilter): """ def __init__( - self, - start_value: bytes | int | None = None, - end_value: bytes | int | None = None, - inclusive_start: bool | None = None, - inclusive_end: bool | None = None, + self, start_value=None, end_value=None, inclusive_start=None, inclusive_end=None ): if inclusive_start is None: inclusive_start = True elif start_value is None: raise ValueError( - "inclusive_start was specified but no start_value was given." - ) - if inclusive_end is None: - inclusive_end = True - elif end_value is None: - raise ValueError( - "inclusive_end was specified but no end_qualifier was given." + "Inclusive start was specified but no " "start value was given." ) if isinstance(start_value, int): start_value = _PACK_I64(start_value) self.start_value = start_value self.inclusive_start = inclusive_start + if inclusive_end is None: + inclusive_end = True + elif end_value is None: + raise ValueError( + "Inclusive end was specified but no " "end value was given." + ) if isinstance(end_value, int): end_value = _PACK_I64(end_value) self.end_value = end_value @@ -612,19 +547,15 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def _to_pb(self) -> data_v2_pb2.RowFilter: + def to_pb(self): """Converts the row filter to a protobuf. First converts to a :class:`.data_v2_pb2.ValueRange` and then uses it to create a row filter protobuf. - Returns: The converted current object. + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. """ - value_range = data_v2_pb2.ValueRange(**self.range_to_dict()) - return data_v2_pb2.RowFilter(value_range_filter=value_range) - - def range_to_dict(self) -> dict[str, bytes]: - """Converts the value range range to a dict representation.""" value_range_kwargs = {} if self.start_value is not None: if self.inclusive_start: @@ -638,17 +569,12 @@ def range_to_dict(self) -> dict[str, bytes]: else: key = "end_value_open" value_range_kwargs[key] = _to_bytes(self.end_value) - return value_range_kwargs - - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"value_range_filter": self.range_to_dict()} - def __repr__(self) -> str: - return f"{self.__class__.__name__}(start_value={self.start_value!r}, end_value={self.end_value!r}, inclusive_start={self.inclusive_start}, inclusive_end={self.inclusive_end})" + value_range = data_v2_pb2.ValueRange(**value_range_kwargs) + return data_v2_pb2.RowFilter(value_range_filter=value_range) -class _CellCountFilter(RowFilter, ABC): +class _CellCountFilter(RowFilter): """Row filter that uses an integer count of cells. The cell count is used as an offset or a limit for the number @@ -658,7 +584,7 @@ class _CellCountFilter(RowFilter, ABC): :param num_cells: An integer count / offset / limit. """ - def __init__(self, num_cells: int): + def __init__(self, num_cells): self.num_cells = num_cells def __eq__(self, other): @@ -669,9 +595,6 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def __repr__(self) -> str: - return f"{self.__class__.__name__}(num_cells={self.num_cells})" - class CellsRowOffsetFilter(_CellCountFilter): """Row filter to skip cells in a row. @@ -680,9 +603,13 @@ class CellsRowOffsetFilter(_CellCountFilter): :param num_cells: Skips the first N cells of the row. """ - def to_dict(self) -> dict[str, int]: - """Converts the row filter to a dict representation.""" - return {"cells_per_row_offset_filter": self.num_cells} + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(cells_per_row_offset_filter=self.num_cells) class CellsRowLimitFilter(_CellCountFilter): @@ -692,9 +619,13 @@ class CellsRowLimitFilter(_CellCountFilter): :param num_cells: Matches only the first N cells of the row. """ - def to_dict(self) -> dict[str, int]: - """Converts the row filter to a dict representation.""" - return {"cells_per_row_limit_filter": self.num_cells} + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells) class CellsColumnLimitFilter(_CellCountFilter): @@ -706,9 +637,13 @@ class CellsColumnLimitFilter(_CellCountFilter): timestamps of each cell. """ - def to_dict(self) -> dict[str, int]: - """Converts the row filter to a dict representation.""" - return {"cells_per_column_limit_filter": self.num_cells} + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(cells_per_column_limit_filter=self.num_cells) class StripValueTransformerFilter(_BoolFilter): @@ -720,9 +655,13 @@ class StripValueTransformerFilter(_BoolFilter): transformer than a generic query / filter. """ - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"strip_value_transformer": self.flag} + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(strip_value_transformer=self.flag) class ApplyLabelFilter(RowFilter): @@ -744,7 +683,7 @@ class ApplyLabelFilter(RowFilter): ``[a-z0-9\\-]+``. """ - def __init__(self, label: str): + def __init__(self, label): self.label = label def __eq__(self, other): @@ -755,15 +694,16 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def to_dict(self) -> dict[str, str]: - """Converts the row filter to a dict representation.""" - return {"apply_label_transformer": self.label} + def to_pb(self): + """Converts the row filter to a protobuf. - def __repr__(self) -> str: - return f"{self.__class__.__name__}(label={self.label})" + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(apply_label_transformer=self.label) -class _FilterCombination(RowFilter, Sequence[RowFilter], ABC): +class _FilterCombination(RowFilter): """Chain of row filters. Sends rows through several filters in sequence. The filters are "chained" @@ -774,10 +714,10 @@ class _FilterCombination(RowFilter, Sequence[RowFilter], ABC): :param filters: List of :class:`RowFilter` """ - def __init__(self, filters: list[RowFilter] | None = None): + def __init__(self, filters=None): if filters is None: filters = [] - self.filters: list[RowFilter] = filters + self.filters = filters def __eq__(self, other): if not isinstance(other, self.__class__): @@ -787,38 +727,6 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def __len__(self) -> int: - return len(self.filters) - - @overload - def __getitem__(self, index: int) -> RowFilter: - # overload signature for type checking - pass - - @overload - def __getitem__(self, index: slice) -> list[RowFilter]: - # overload signature for type checking - pass - - def __getitem__(self, index): - return self.filters[index] - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(filters={self.filters})" - - def __str__(self) -> str: - """ - Returns a string representation of the filter chain. - - Adds line breaks between each sub-filter for readability. - """ - output = [f"{self.__class__.__name__}(["] - for filter_ in self.filters: - filter_lines = f"{filter_},".splitlines() - output.extend([f" {line}" for line in filter_lines]) - output.append("])") - return "\n".join(output) - class RowFilterChain(_FilterCombination): """Chain of row filters. @@ -831,20 +739,17 @@ class RowFilterChain(_FilterCombination): :param filters: List of :class:`RowFilter` """ - def _to_pb(self) -> data_v2_pb2.RowFilter: + def to_pb(self): """Converts the row filter to a protobuf. - Returns: The converted current object. + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. """ chain = data_v2_pb2.RowFilter.Chain( - filters=[row_filter._to_pb() for row_filter in self.filters] + filters=[row_filter.to_pb() for row_filter in self.filters] ) return data_v2_pb2.RowFilter(chain=chain) - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"chain": {"filters": [f.to_dict() for f in self.filters]}} - class RowFilterUnion(_FilterCombination): """Union of row filters. @@ -859,58 +764,50 @@ class RowFilterUnion(_FilterCombination): :param filters: List of :class:`RowFilter` """ - def _to_pb(self) -> data_v2_pb2.RowFilter: + def to_pb(self): """Converts the row filter to a protobuf. - Returns: The converted current object. + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. """ interleave = data_v2_pb2.RowFilter.Interleave( - filters=[row_filter._to_pb() for row_filter in self.filters] + filters=[row_filter.to_pb() for row_filter in self.filters] ) return data_v2_pb2.RowFilter(interleave=interleave) - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"interleave": {"filters": [f.to_dict() for f in self.filters]}} - class ConditionalRowFilter(RowFilter): """Conditional row filter which exhibits ternary behavior. - Executes one of two filters based on another filter. If the ``predicate_filter`` + Executes one of two filters based on another filter. If the ``base_filter`` returns any cells in the row, then ``true_filter`` is executed. If not, then ``false_filter`` is executed. .. note:: - The ``predicate_filter`` does not execute atomically with the true and false + The ``base_filter`` does not execute atomically with the true and false filters, which may lead to inconsistent or unexpected results. Additionally, executing a :class:`ConditionalRowFilter` has poor performance on the server, especially when ``false_filter`` is set. - :type predicate_filter: :class:`RowFilter` - :param predicate_filter: The filter to condition on before executing the + :type base_filter: :class:`RowFilter` + :param base_filter: The filter to condition on before executing the true/false filters. :type true_filter: :class:`RowFilter` :param true_filter: (Optional) The filter to execute if there are any cells - matching ``predicate_filter``. If not provided, no results + matching ``base_filter``. If not provided, no results will be returned in the true case. :type false_filter: :class:`RowFilter` :param false_filter: (Optional) The filter to execute if there are no cells - matching ``predicate_filter``. If not provided, no results + matching ``base_filter``. If not provided, no results will be returned in the false case. """ - def __init__( - self, - predicate_filter: RowFilter, - true_filter: RowFilter | None = None, - false_filter: RowFilter | None = None, - ): - self.predicate_filter = predicate_filter + def __init__(self, base_filter, true_filter=None, false_filter=None): + self.base_filter = base_filter self.true_filter = true_filter self.false_filter = false_filter @@ -918,7 +815,7 @@ def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return ( - other.predicate_filter == self.predicate_filter + other.base_filter == self.base_filter and other.true_filter == self.true_filter and other.false_filter == self.false_filter ) @@ -926,43 +823,16 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def _to_pb(self) -> data_v2_pb2.RowFilter: + def to_pb(self): """Converts the row filter to a protobuf. - Returns: The converted current object. + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. """ - condition_kwargs = {"predicate_filter": self.predicate_filter._to_pb()} + condition_kwargs = {"predicate_filter": self.base_filter.to_pb()} if self.true_filter is not None: - condition_kwargs["true_filter"] = self.true_filter._to_pb() + condition_kwargs["true_filter"] = self.true_filter.to_pb() if self.false_filter is not None: - condition_kwargs["false_filter"] = self.false_filter._to_pb() + condition_kwargs["false_filter"] = self.false_filter.to_pb() condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs) return data_v2_pb2.RowFilter(condition=condition) - - def condition_to_dict(self) -> dict[str, Any]: - """Converts the condition to a dict representation.""" - condition_kwargs = {"predicate_filter": self.predicate_filter.to_dict()} - if self.true_filter is not None: - condition_kwargs["true_filter"] = self.true_filter.to_dict() - if self.false_filter is not None: - condition_kwargs["false_filter"] = self.false_filter.to_dict() - return condition_kwargs - - def to_dict(self) -> dict[str, Any]: - """Converts the row filter to a dict representation.""" - return {"condition": self.condition_to_dict()} - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(predicate_filter={self.predicate_filter!r}, true_filter={self.true_filter!r}, false_filter={self.false_filter!r})" - - def __str__(self) -> str: - output = [f"{self.__class__.__name__}("] - for filter_type in ("predicate_filter", "true_filter", "false_filter"): - filter_ = getattr(self, filter_type) - if filter_ is None: - continue - # add the new filter set, adding indentations for readability - filter_lines = f"{filter_type}={filter_},".splitlines() - output.extend(f" {line}" for line in filter_lines) - output.append(")") - return "\n".join(output) diff --git a/google/cloud/bigtable/deprecated/row_merger.py b/google/cloud/bigtable/row_merger.py similarity index 99% rename from google/cloud/bigtable/deprecated/row_merger.py rename to google/cloud/bigtable/row_merger.py index d29d64eb2..515b91df7 100644 --- a/google/cloud/bigtable/deprecated/row_merger.py +++ b/google/cloud/bigtable/row_merger.py @@ -1,6 +1,6 @@ from enum import Enum from collections import OrderedDict -from google.cloud.bigtable.deprecated.row import Cell, PartialRowData, InvalidChunk +from google.cloud.bigtable.row import Cell, PartialRowData, InvalidChunk _MISSING_COLUMN_FAMILY = "Column family {} is not among the cells stored in this row." _MISSING_COLUMN = ( diff --git a/google/cloud/bigtable/deprecated/row_set.py b/google/cloud/bigtable/row_set.py similarity index 100% rename from google/cloud/bigtable/deprecated/row_set.py rename to google/cloud/bigtable/row_set.py diff --git a/google/cloud/bigtable/deprecated/table.py b/google/cloud/bigtable/table.py similarity index 95% rename from google/cloud/bigtable/deprecated/table.py rename to google/cloud/bigtable/table.py index cf60b066e..e3191a729 100644 --- a/google/cloud/bigtable/deprecated/table.py +++ b/google/cloud/bigtable/table.py @@ -28,24 +28,24 @@ from google.api_core.retry import if_exception_type from google.api_core.retry import Retry from google.cloud._helpers import _to_bytes # type: ignore -from google.cloud.bigtable.deprecated.backup import Backup -from google.cloud.bigtable.deprecated.column_family import _gc_rule_from_pb -from google.cloud.bigtable.deprecated.column_family import ColumnFamily -from google.cloud.bigtable.deprecated.batcher import MutationsBatcher -from google.cloud.bigtable.deprecated.batcher import FLUSH_COUNT, MAX_ROW_BYTES -from google.cloud.bigtable.deprecated.encryption_info import EncryptionInfo -from google.cloud.bigtable.deprecated.policy import Policy -from google.cloud.bigtable.deprecated.row import AppendRow -from google.cloud.bigtable.deprecated.row import ConditionalRow -from google.cloud.bigtable.deprecated.row import DirectRow -from google.cloud.bigtable.deprecated.row_data import ( +from google.cloud.bigtable.backup import Backup +from google.cloud.bigtable.column_family import _gc_rule_from_pb +from google.cloud.bigtable.column_family import ColumnFamily +from google.cloud.bigtable.batcher import MutationsBatcher +from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_MUTATION_SIZE +from google.cloud.bigtable.encryption_info import EncryptionInfo +from google.cloud.bigtable.policy import Policy +from google.cloud.bigtable.row import AppendRow +from google.cloud.bigtable.row import ConditionalRow +from google.cloud.bigtable.row import DirectRow +from google.cloud.bigtable.row_data import ( PartialRowsData, _retriable_internal_server_error, ) -from google.cloud.bigtable.deprecated.row_data import DEFAULT_RETRY_READ_ROWS -from google.cloud.bigtable.deprecated.row_set import RowSet -from google.cloud.bigtable.deprecated.row_set import RowRange -from google.cloud.bigtable.deprecated import enums +from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS +from google.cloud.bigtable.row_set import RowSet +from google.cloud.bigtable.row_set import RowRange +from google.cloud.bigtable import enums from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient from google.cloud.bigtable_admin_v2.types import table as admin_messages_v2_pb2 @@ -88,7 +88,7 @@ class _BigtableRetryableError(Exception): ) """The default retry strategy to be used on retry-able errors. -Used by :meth:`~google.cloud.bigtable.deprecated.table.Table.mutate_rows`. +Used by :meth:`~google.cloud.bigtable.table.Table.mutate_rows`. """ @@ -119,7 +119,7 @@ class Table(object): :type table_id: str :param table_id: The ID of the table. - :type instance: :class:`~google.cloud.bigtable.deprecated.instance.Instance` + :type instance: :class:`~google.cloud.bigtable.instance.Instance` :param instance: The instance that owns the table. :type app_profile_id: str @@ -172,7 +172,7 @@ def get_iam_policy(self): :end-before: [END bigtable_api_table_get_iam_policy] :dedent: 4 - :rtype: :class:`google.cloud.bigtable.deprecated.policy.Policy` + :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this table. """ table_client = self._instance._client.table_admin_client @@ -184,7 +184,7 @@ def set_iam_policy(self, policy): existing policy. For more information about policy, please see documentation of - class `google.cloud.bigtable.deprecated.policy.Policy` + class `google.cloud.bigtable.policy.Policy` For example: @@ -193,11 +193,11 @@ class `google.cloud.bigtable.deprecated.policy.Policy` :end-before: [END bigtable_api_table_set_iam_policy] :dedent: 4 - :type policy: :class:`google.cloud.bigtable.deprecated.policy.Policy` + :type policy: :class:`google.cloud.bigtable.policy.Policy` :param policy: A new IAM policy to replace the current IAM policy of this table. - :rtype: :class:`google.cloud.bigtable.deprecated.policy.Policy` + :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this table. """ table_client = self._instance._client.table_admin_client @@ -271,7 +271,7 @@ def row(self, row_key, filter_=None, append=False): .. warning:: At most one of ``filter_`` and ``append`` can be used in a - :class:`~google.cloud.bigtable.deprecated.row.Row`. + :class:`~google.cloud.bigtable.row.Row`. :type row_key: bytes :param row_key: The key for the row being created. @@ -284,7 +284,7 @@ def row(self, row_key, filter_=None, append=False): :param append: (Optional) Flag to determine if the row should be used for append mutations. - :rtype: :class:`~google.cloud.bigtable.deprecated.row.Row` + :rtype: :class:`~google.cloud.bigtable.row.Row` :returns: A row owned by this table. :raises: :class:`ValueError ` if both ``filter_`` and ``append`` are used. @@ -307,7 +307,7 @@ def row(self, row_key, filter_=None, append=False): return DirectRow(row_key, self) def append_row(self, row_key): - """Create a :class:`~google.cloud.bigtable.deprecated.row.AppendRow` associated with this table. + """Create a :class:`~google.cloud.bigtable.row.AppendRow` associated with this table. For example: @@ -325,7 +325,7 @@ def append_row(self, row_key): return AppendRow(row_key, self) def direct_row(self, row_key): - """Create a :class:`~google.cloud.bigtable.deprecated.row.DirectRow` associated with this table. + """Create a :class:`~google.cloud.bigtable.row.DirectRow` associated with this table. For example: @@ -343,7 +343,7 @@ def direct_row(self, row_key): return DirectRow(row_key, self) def conditional_row(self, row_key, filter_): - """Create a :class:`~google.cloud.bigtable.deprecated.row.ConditionalRow` associated with this table. + """Create a :class:`~google.cloud.bigtable.row.ConditionalRow` associated with this table. For example: @@ -515,7 +515,7 @@ def get_encryption_info(self): :rtype: dict :returns: Dictionary of encryption info for this table. Keys are cluster ids and - values are tuples of :class:`google.cloud.bigtable.deprecated.encryption.EncryptionInfo` instances. + values are tuples of :class:`google.cloud.bigtable.encryption.EncryptionInfo` instances. """ ENCRYPTION_VIEW = enums.Table.View.ENCRYPTION_VIEW table_client = self._instance._client.table_admin_client @@ -844,7 +844,9 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): request={"name": self.name, "row_key_prefix": _to_bytes(row_key_prefix)} ) - def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): + def mutations_batcher( + self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_MUTATION_SIZE + ): """Factory to create a mutation batcher associated with this instance. For example: @@ -967,7 +969,7 @@ def list_backups(self, cluster_id=None, filter_=None, order_by=None, page_size=0 number of resources in a page. :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: Iterator of :class:`~google.cloud.bigtable.deprecated.backup.Backup` + :returns: Iterator of :class:`~google.cloud.bigtable.backup.Backup` resources within the current Instance. :raises: :class:`ValueError ` if one of the returned Backups' name is not of the expected format. @@ -1367,8 +1369,8 @@ def _check_row_table_name(table_name, row): :type table_name: str :param table_name: The name of the table. - :type row: :class:`~google.cloud.bigtable.deprecated.row.Row` - :param row: An instance of :class:`~google.cloud.bigtable.deprecated.row.Row` + :type row: :class:`~google.cloud.bigtable.row.Row` + :param row: An instance of :class:`~google.cloud.bigtable.row.Row` subclasses. :raises: :exc:`~.table.TableMismatchError` if the row does not belong to @@ -1384,8 +1386,8 @@ def _check_row_table_name(table_name, row): def _check_row_type(row): """Checks that a row is an instance of :class:`.DirectRow`. - :type row: :class:`~google.cloud.bigtable.deprecated.row.Row` - :param row: An instance of :class:`~google.cloud.bigtable.deprecated.row.Row` + :type row: :class:`~google.cloud.bigtable.row.Row` + :param row: An instance of :class:`~google.cloud.bigtable.row.Row` subclasses. :raises: :class:`TypeError ` if the row is not an diff --git a/google/cloud/bigtable_admin/__init__.py b/google/cloud/bigtable_admin/__init__.py index 6ddc6acb2..0ba93ec63 100644 --- a/google/cloud/bigtable_admin/__init__.py +++ b/google/cloud/bigtable_admin/__init__.py @@ -200,6 +200,7 @@ from google.cloud.bigtable_admin_v2.types.instance import Instance from google.cloud.bigtable_admin_v2.types.table import Backup from google.cloud.bigtable_admin_v2.types.table import BackupInfo +from google.cloud.bigtable_admin_v2.types.table import ChangeStreamConfig from google.cloud.bigtable_admin_v2.types.table import ColumnFamily from google.cloud.bigtable_admin_v2.types.table import EncryptionInfo from google.cloud.bigtable_admin_v2.types.table import GcRule @@ -282,6 +283,7 @@ "Instance", "Backup", "BackupInfo", + "ChangeStreamConfig", "ColumnFamily", "EncryptionInfo", "GcRule", diff --git a/google/cloud/bigtable_admin/gapic_version.py b/google/cloud/bigtable_admin/gapic_version.py index 8d4f4cfb6..0f1a446f3 100644 --- a/google/cloud/bigtable_admin/gapic_version.py +++ b/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.17.0" # {x-release-please-version} +__version__ = "2.19.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py index 282834fe7..c030ec1bd 100644 --- a/google/cloud/bigtable_admin_v2/__init__.py +++ b/google/cloud/bigtable_admin_v2/__init__.py @@ -92,6 +92,7 @@ from .types.instance import Instance from .types.table import Backup from .types.table import BackupInfo +from .types.table import ChangeStreamConfig from .types.table import ColumnFamily from .types.table import EncryptionInfo from .types.table import GcRule @@ -110,6 +111,7 @@ "BackupInfo", "BigtableInstanceAdminClient", "BigtableTableAdminClient", + "ChangeStreamConfig", "CheckConsistencyRequest", "CheckConsistencyResponse", "Cluster", diff --git a/google/cloud/bigtable_admin_v2/gapic_version.py b/google/cloud/bigtable_admin_v2/gapic_version.py index 8d4f4cfb6..0f1a446f3 100644 --- a/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.17.0" # {x-release-please-version} +__version__ = "2.19.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index ddeaf979a..12811bcea 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1137,8 +1137,8 @@ async def update_cluster( Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]]): - The request object. A resizable group of nodes in a - particular cloud location, capable of serving all + The request object. A resizable group of nodes in a particular cloud + location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -1880,8 +1880,7 @@ async def get_iam_policy( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): - The request object. Request message for `GetIamPolicy` - method. + The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the policy is being requested. See the @@ -2030,8 +2029,7 @@ async def set_iam_policy( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): - The request object. Request message for `SetIamPolicy` - method. + The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the policy is being specified. See the @@ -2171,8 +2169,7 @@ async def test_iam_permissions( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): - The request object. Request message for - `TestIamPermissions` method. + The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the policy detail is being requested. See diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index fcb767a3d..ecc9bf1e2 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -1400,8 +1400,8 @@ def update_cluster( Args: request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]): - The request object. A resizable group of nodes in a - particular cloud location, capable of serving all + The request object. A resizable group of nodes in a particular cloud + location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -2104,8 +2104,7 @@ def get_iam_policy( Args: request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): - The request object. Request message for `GetIamPolicy` - method. + The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the policy is being requested. See the @@ -2241,8 +2240,7 @@ def set_iam_policy( Args: request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): - The request object. Request message for `SetIamPolicy` - method. + The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the policy is being specified. See the @@ -2379,8 +2377,7 @@ def test_iam_permissions( Args: request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): - The request object. Request message for - `TestIamPermissions` method. + The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the policy detail is being requested. See diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 5ae9600a9..e9b94cf78 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -874,7 +874,6 @@ def __call__( request (~.bigtable_instance_admin.CreateAppProfileRequest): The request object. Request message for BigtableInstanceAdmin.CreateAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -978,7 +977,6 @@ def __call__( request (~.bigtable_instance_admin.CreateClusterRequest): The request object. Request message for BigtableInstanceAdmin.CreateCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1076,7 +1074,6 @@ def __call__( request (~.bigtable_instance_admin.CreateInstanceRequest): The request object. Request message for BigtableInstanceAdmin.CreateInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1176,7 +1173,6 @@ def __call__( request (~.bigtable_instance_admin.DeleteAppProfileRequest): The request object. Request message for BigtableInstanceAdmin.DeleteAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1254,7 +1250,6 @@ def __call__( request (~.bigtable_instance_admin.DeleteClusterRequest): The request object. Request message for BigtableInstanceAdmin.DeleteCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1330,7 +1325,6 @@ def __call__( request (~.bigtable_instance_admin.DeleteInstanceRequest): The request object. Request message for BigtableInstanceAdmin.DeleteInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1406,7 +1400,6 @@ def __call__( request (~.bigtable_instance_admin.GetAppProfileRequest): The request object. Request message for BigtableInstanceAdmin.GetAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1497,7 +1490,6 @@ def __call__( request (~.bigtable_instance_admin.GetClusterRequest): The request object. Request message for BigtableInstanceAdmin.GetCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1759,7 +1751,6 @@ def __call__( request (~.bigtable_instance_admin.GetInstanceRequest): The request object. Request message for BigtableInstanceAdmin.GetInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1853,7 +1844,6 @@ def __call__( request (~.bigtable_instance_admin.ListAppProfilesRequest): The request object. Request message for BigtableInstanceAdmin.ListAppProfiles. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1945,7 +1935,6 @@ def __call__( request (~.bigtable_instance_admin.ListClustersRequest): The request object. Request message for BigtableInstanceAdmin.ListClusters. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2035,7 +2024,6 @@ def __call__( request (~.bigtable_instance_admin.ListHotTabletsRequest): The request object. Request message for BigtableInstanceAdmin.ListHotTablets. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2127,7 +2115,6 @@ def __call__( request (~.bigtable_instance_admin.ListInstancesRequest): The request object. Request message for BigtableInstanceAdmin.ListInstances. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2219,7 +2206,6 @@ def __call__( request (~.bigtable_instance_admin.PartialUpdateClusterRequest): The request object. Request message for BigtableInstanceAdmin.PartialUpdateCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2321,7 +2307,6 @@ def __call__( request (~.bigtable_instance_admin.PartialUpdateInstanceRequest): The request object. Request message for BigtableInstanceAdmin.PartialUpdateInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2692,7 +2677,6 @@ def __call__( request (~.bigtable_instance_admin.UpdateAppProfileRequest): The request object. Request message for BigtableInstanceAdmin.UpdateAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2784,7 +2768,6 @@ def __call__( location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2885,7 +2868,6 @@ def __call__( served from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index bc85e5c5d..1663c16eb 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -369,6 +369,7 @@ async def create_table_from_snapshot( request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -682,16 +683,19 @@ async def update_table( should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. The list of fields to update. A mask - specifying which fields (e.g. ``deletion_protection``) + specifying which fields (e.g. ``change_stream_config``) in the ``table`` field should be updated. This mask is relative to the ``table`` field, not to the request message. The wildcard (*) path is currently not supported. Currently UpdateTable is only supported for - the following field: + the following fields: + + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` - - ``deletion_protection`` If ``column_families`` is set - in ``update_mask``, it will return an UNIMPLEMENTED - error. + If ``column_families`` is set in ``update_mask``, it + will return an UNIMPLEMENTED error. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1300,6 +1304,7 @@ async def snapshot_table( request (Optional[Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1437,6 +1442,7 @@ async def get_snapshot( request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1549,6 +1555,7 @@ async def list_snapshots( request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1672,6 +1679,7 @@ async def delete_snapshot( request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -2290,8 +2298,7 @@ async def get_iam_policy( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): - The request object. Request message for `GetIamPolicy` - method. + The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the policy is being requested. See the @@ -2440,8 +2447,7 @@ async def set_iam_policy( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): - The request object. Request message for `SetIamPolicy` - method. + The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the policy is being specified. See the @@ -2581,8 +2587,7 @@ async def test_iam_permissions( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): - The request object. Request message for - `TestIamPermissions` method. + The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the policy detail is being requested. See diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index aa7eaa197..e043aa224 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -696,6 +696,7 @@ def create_table_from_snapshot( request (Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -991,16 +992,19 @@ def update_table( should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The list of fields to update. A mask - specifying which fields (e.g. ``deletion_protection``) + specifying which fields (e.g. ``change_stream_config``) in the ``table`` field should be updated. This mask is relative to the ``table`` field, not to the request message. The wildcard (*) path is currently not supported. Currently UpdateTable is only supported for - the following field: + the following fields: + + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` - - ``deletion_protection`` If ``column_families`` is set - in ``update_mask``, it will return an UNIMPLEMENTED - error. + If ``column_families`` is set in ``update_mask``, it + will return an UNIMPLEMENTED error. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1594,6 +1598,7 @@ def snapshot_table( request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1731,6 +1736,7 @@ def get_snapshot( request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1833,6 +1839,7 @@ def list_snapshots( request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1946,6 +1953,7 @@ def delete_snapshot( request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -2545,8 +2553,7 @@ def get_iam_policy( Args: request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): - The request object. Request message for `GetIamPolicy` - method. + The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the policy is being requested. See the @@ -2682,8 +2689,7 @@ def set_iam_policy( Args: request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): - The request object. Request message for `SetIamPolicy` - method. + The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the policy is being specified. See the @@ -2820,8 +2826,7 @@ def test_iam_permissions( Args: request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): - The request object. Request message for - `TestIamPermissions` method. + The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the policy detail is being requested. See diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 5c25ac556..4d5b2ed1c 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -938,7 +938,6 @@ def __call__( request (~.bigtable_table_admin.CheckConsistencyRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1041,7 +1040,6 @@ def __call__( request (~.bigtable_table_admin.CreateBackupRequest): The request object. The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1139,7 +1137,6 @@ def __call__( request (~.bigtable_table_admin.CreateTableRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1248,7 +1245,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1348,7 +1344,6 @@ def __call__( request (~.bigtable_table_admin.DeleteBackupRequest): The request object. The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1431,7 +1426,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1507,7 +1501,6 @@ def __call__( request (~.bigtable_table_admin.DeleteTableRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1583,7 +1576,6 @@ def __call__( request (~.bigtable_table_admin.DropRowRangeRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1669,7 +1661,6 @@ def __call__( request (~.bigtable_table_admin.GenerateConsistencyTokenRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1772,7 +1763,6 @@ def __call__( request (~.bigtable_table_admin.GetBackupRequest): The request object. The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2042,7 +2032,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2142,7 +2131,6 @@ def __call__( request (~.bigtable_table_admin.GetTableRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2234,7 +2222,6 @@ def __call__( request (~.bigtable_table_admin.ListBackupsRequest): The request object. The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2331,7 +2318,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2428,7 +2414,6 @@ def __call__( request (~.bigtable_table_admin.ListTablesRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2518,7 +2503,6 @@ def __call__( request (~.bigtable_table_admin.ModifyColumnFamiliesRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2621,7 +2605,6 @@ def __call__( request (~.bigtable_table_admin.RestoreTableRequest): The request object. The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2901,7 +2884,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3101,7 +3083,6 @@ def __call__( request (~.bigtable_table_admin.UndeleteTableRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3201,7 +3182,6 @@ def __call__( request (~.bigtable_table_admin.UpdateBackupRequest): The request object. The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3300,7 +3280,6 @@ def __call__( request (~.bigtable_table_admin.UpdateTableRequest): The request object. The request for [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. diff --git a/google/cloud/bigtable_admin_v2/types/__init__.py b/google/cloud/bigtable_admin_v2/types/__init__.py index 5a66ddf09..69153c9fc 100644 --- a/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/google/cloud/bigtable_admin_v2/types/__init__.py @@ -91,6 +91,7 @@ from .table import ( Backup, BackupInfo, + ChangeStreamConfig, ColumnFamily, EncryptionInfo, GcRule, @@ -170,6 +171,7 @@ "Instance", "Backup", "BackupInfo", + "ChangeStreamConfig", "ColumnFamily", "EncryptionInfo", "GcRule", diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 9b236fea9..4c4b9e9e2 100644 --- a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -460,14 +460,18 @@ class UpdateTableRequest(proto.Message): used to identify the table to update. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The list of fields to update. A mask specifying - which fields (e.g. ``deletion_protection``) in the ``table`` - field should be updated. This mask is relative to the - ``table`` field, not to the request message. The wildcard - (*) path is currently not supported. Currently UpdateTable - is only supported for the following field: - - - ``deletion_protection`` If ``column_families`` is set in - ``update_mask``, it will return an UNIMPLEMENTED error. + which fields (e.g. ``change_stream_config``) in the + ``table`` field should be updated. This mask is relative to + the ``table`` field, not to the request message. The + wildcard (*) path is currently not supported. Currently + UpdateTable is only supported for the following fields: + + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + + If ``column_families`` is set in ``update_mask``, it will + return an UNIMPLEMENTED error. """ table: gba_table.Table = proto.Field( diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py index fd936df63..16d136e16 100644 --- a/google/cloud/bigtable_admin_v2/types/table.py +++ b/google/cloud/bigtable_admin_v2/types/table.py @@ -29,6 +29,7 @@ manifest={ "RestoreSourceType", "RestoreInfo", + "ChangeStreamConfig", "Table", "ColumnFamily", "GcRule", @@ -82,6 +83,27 @@ class RestoreInfo(proto.Message): ) +class ChangeStreamConfig(proto.Message): + r"""Change stream configuration. + + Attributes: + retention_period (google.protobuf.duration_pb2.Duration): + How long the change stream should be + retained. Change stream data older than the + retention period will not be returned when + reading the change stream from the table. + Values must be at least 1 day and at most 7 + days, and will be truncated to microsecond + granularity. + """ + + retention_period: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + + class Table(proto.Message): r"""A collection of user data indexed by row, column, and timestamp. Each table is served using the resources of its @@ -114,6 +136,10 @@ class Table(proto.Message): another data source (e.g. a backup), this field will be populated with information about the restore. + change_stream_config (google.cloud.bigtable_admin_v2.types.ChangeStreamConfig): + If specified, enable the change stream on + this table. Otherwise, the change stream is + disabled and the change stream is not retained. deletion_protection (bool): Set to true to make the table protected against data loss. i.e. deleting the following @@ -263,6 +289,11 @@ class ReplicationState(proto.Enum): number=6, message="RestoreInfo", ) + change_stream_config: "ChangeStreamConfig" = proto.Field( + proto.MESSAGE, + number=8, + message="ChangeStreamConfig", + ) deletion_protection: bool = proto.Field( proto.BOOL, number=9, diff --git a/google/cloud/bigtable_v2/__init__.py b/google/cloud/bigtable_v2/__init__.py index 342718dea..ee3bd8c0c 100644 --- a/google/cloud/bigtable_v2/__init__.py +++ b/google/cloud/bigtable_v2/__init__.py @@ -31,6 +31,7 @@ from .types.bigtable import MutateRowsResponse from .types.bigtable import PingAndWarmRequest from .types.bigtable import PingAndWarmResponse +from .types.bigtable import RateLimitInfo from .types.bigtable import ReadChangeStreamRequest from .types.bigtable import ReadChangeStreamResponse from .types.bigtable import ReadModifyWriteRowRequest @@ -54,6 +55,7 @@ from .types.data import StreamPartition from .types.data import TimestampRange from .types.data import ValueRange +from .types.feature_flags import FeatureFlags from .types.request_stats import FullReadStatsView from .types.request_stats import ReadIterationStats from .types.request_stats import RequestLatencyStats @@ -69,6 +71,7 @@ "Column", "ColumnRange", "Family", + "FeatureFlags", "FullReadStatsView", "GenerateInitialChangeStreamPartitionsRequest", "GenerateInitialChangeStreamPartitionsResponse", @@ -79,6 +82,7 @@ "Mutation", "PingAndWarmRequest", "PingAndWarmResponse", + "RateLimitInfo", "ReadChangeStreamRequest", "ReadChangeStreamResponse", "ReadIterationStats", diff --git a/google/cloud/bigtable_v2/gapic_version.py b/google/cloud/bigtable_v2/gapic_version.py index 8d4f4cfb6..0f1a446f3 100644 --- a/google/cloud/bigtable_v2/gapic_version.py +++ b/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.17.0" # {x-release-please-version} +__version__ = "2.19.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py index 3465569b3..abd82d4d8 100644 --- a/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -242,8 +242,10 @@ def read_rows( on the ``request`` instance; if ``request`` is provided, this should not be set. app_profile_id (:class:`str`): - This value specifies routing for replication. This API - only accepts the empty value of app_profile_id. + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -807,8 +809,8 @@ async def ping_and_warm( Args: request (Optional[Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]]): - The request object. Request message for client - connection keep-alive and warming. + The request object. Request message for client connection + keep-alive and warming. name (:class:`str`): Required. The unique name of the instance to check permissions for as well as respond. Values are of the @@ -1027,8 +1029,9 @@ def generate_initial_change_stream_partitions( Args: request (Optional[Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]]): - The request object. NOTE: This API is intended to be - used by Apache Beam BigtableIO. Request message for + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (:class:`str`): Required. The unique name of the table from which to get @@ -1126,9 +1129,9 @@ def read_change_stream( Args: request (Optional[Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]]): - The request object. NOTE: This API is intended to be - used by Apache Beam BigtableIO. Request message for - Bigtable.ReadChangeStream. + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. table_name (:class:`str`): Required. The unique name of the table from which to read a change stream. Values are of the form diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py index 60622509a..b0efc8a0b 100644 --- a/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -493,8 +493,10 @@ def read_rows( on the ``request`` instance; if ``request`` is provided, this should not be set. app_profile_id (str): - This value specifies routing for replication. This API - only accepts the empty value of app_profile_id. + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -1093,8 +1095,8 @@ def ping_and_warm( Args: request (Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]): - The request object. Request message for client - connection keep-alive and warming. + The request object. Request message for client connection + keep-alive and warming. name (str): Required. The unique name of the instance to check permissions for as well as respond. Values are of the @@ -1329,8 +1331,9 @@ def generate_initial_change_stream_partitions( Args: request (Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]): - The request object. NOTE: This API is intended to be - used by Apache Beam BigtableIO. Request message for + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (str): Required. The unique name of the table from which to get @@ -1432,9 +1435,9 @@ def read_change_stream( Args: request (Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]): - The request object. NOTE: This API is intended to be - used by Apache Beam BigtableIO. Request message for - Bigtable.ReadChangeStream. + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. table_name (str): Required. The unique name of the table from which to read a change stream. Values are of the form diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index ee9cb046f..4343fbb90 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -471,7 +471,6 @@ def __call__( request (~.bigtable.CheckAndMutateRowRequest): The request object. Request message for Bigtable.CheckAndMutateRow. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -575,7 +574,6 @@ def __call__( by Apache Beam BigtableIO. Request message for Bigtable.GenerateInitialChangeStreamPartitions. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -684,7 +682,6 @@ def __call__( request (~.bigtable.MutateRowRequest): The request object. Request message for Bigtable.MutateRow. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -783,7 +780,6 @@ def __call__( request (~.bigtable.MutateRowsRequest): The request object. Request message for BigtableService.MutateRows. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -881,7 +877,6 @@ def __call__( request (~.bigtable.PingAndWarmRequest): The request object. Request message for client connection keep-alive and warming. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -982,7 +977,6 @@ def __call__( The request object. NOTE: This API is intended to be used by Apache Beam BigtableIO. Request message for Bigtable.ReadChangeStream. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1083,7 +1077,6 @@ def __call__( request (~.bigtable.ReadModifyWriteRowRequest): The request object. Request message for Bigtable.ReadModifyWriteRow. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1184,7 +1177,6 @@ def __call__( request (~.bigtable.ReadRowsRequest): The request object. Request message for Bigtable.ReadRows. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1280,7 +1272,6 @@ def __call__( request (~.bigtable.SampleRowKeysRequest): The request object. Request message for Bigtable.SampleRowKeys. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. diff --git a/google/cloud/bigtable_v2/types/__init__.py b/google/cloud/bigtable_v2/types/__init__.py index bb2533e33..9f15efaf5 100644 --- a/google/cloud/bigtable_v2/types/__init__.py +++ b/google/cloud/bigtable_v2/types/__init__.py @@ -24,6 +24,7 @@ MutateRowsResponse, PingAndWarmRequest, PingAndWarmResponse, + RateLimitInfo, ReadChangeStreamRequest, ReadChangeStreamResponse, ReadModifyWriteRowRequest, @@ -50,6 +51,9 @@ TimestampRange, ValueRange, ) +from .feature_flags import ( + FeatureFlags, +) from .request_stats import ( FullReadStatsView, ReadIterationStats, @@ -71,6 +75,7 @@ "MutateRowsResponse", "PingAndWarmRequest", "PingAndWarmResponse", + "RateLimitInfo", "ReadChangeStreamRequest", "ReadChangeStreamResponse", "ReadModifyWriteRowRequest", @@ -94,6 +99,7 @@ "StreamPartition", "TimestampRange", "ValueRange", + "FeatureFlags", "FullReadStatsView", "ReadIterationStats", "RequestLatencyStats", diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py index ea97588c2..13f6ac0db 100644 --- a/google/cloud/bigtable_v2/types/bigtable.py +++ b/google/cloud/bigtable_v2/types/bigtable.py @@ -38,6 +38,7 @@ "MutateRowResponse", "MutateRowsRequest", "MutateRowsResponse", + "RateLimitInfo", "CheckAndMutateRowRequest", "CheckAndMutateRowResponse", "PingAndWarmRequest", @@ -61,8 +62,9 @@ class ReadRowsRequest(proto.Message): Values are of the form ``projects//instances//tables/``. app_profile_id (str): - This value specifies routing for replication. This API only - accepts the empty value of app_profile_id. + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. rows (google.cloud.bigtable_v2.types.RowSet): The row keys and/or ranges to read sequentially. If not specified, reads from all @@ -469,10 +471,19 @@ class Entry(proto.Message): class MutateRowsResponse(proto.Message): r"""Response message for BigtableService.MutateRows. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: entries (MutableSequence[google.cloud.bigtable_v2.types.MutateRowsResponse.Entry]): One or more results for Entries from the batch request. + rate_limit_info (google.cloud.bigtable_v2.types.RateLimitInfo): + Information about how client should limit the + rate (QPS). Primirily used by supported official + Cloud Bigtable clients. If unset, the rate limit + info is not provided by the server. + + This field is a member of `oneof`_ ``_rate_limit_info``. """ class Entry(proto.Message): @@ -506,6 +517,50 @@ class Entry(proto.Message): number=1, message=Entry, ) + rate_limit_info: "RateLimitInfo" = proto.Field( + proto.MESSAGE, + number=3, + optional=True, + message="RateLimitInfo", + ) + + +class RateLimitInfo(proto.Message): + r"""Information about how client should adjust the load to + Bigtable. + + Attributes: + period (google.protobuf.duration_pb2.Duration): + Time that clients should wait before + adjusting the target rate again. If clients + adjust rate too frequently, the impact of the + previous adjustment may not have been taken into + account and may over-throttle or under-throttle. + If clients adjust rate too slowly, they will not + be responsive to load changes on server side, + and may over-throttle or under-throttle. + factor (float): + If it has been at least one ``period`` since the last load + adjustment, the client should multiply the current load by + this value to get the new target load. For example, if the + current load is 100 and ``factor`` is 0.8, the new target + load should be 80. After adjusting, the client should ignore + ``factor`` until another ``period`` has passed. + + The client can measure its load using any unit that's + comparable over time For example, QPS can be used as long as + each request involves a similar amount of work. + """ + + period: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + factor: float = proto.Field( + proto.DOUBLE, + number=2, + ) class CheckAndMutateRowRequest(proto.Message): diff --git a/google/cloud/bigtable_v2/types/feature_flags.py b/google/cloud/bigtable_v2/types/feature_flags.py new file mode 100644 index 000000000..1b5f76e24 --- /dev/null +++ b/google/cloud/bigtable_v2/types/feature_flags.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.v2", + manifest={ + "FeatureFlags", + }, +) + + +class FeatureFlags(proto.Message): + r"""Feature flags supported by a client. This is intended to be sent as + part of request metadata to assure the server that certain behaviors + are safe to enable. This proto is meant to be serialized and + websafe-base64 encoded under the ``bigtable-features`` metadata key. + The value will remain constant for the lifetime of a client and due + to HTTP2's HPACK compression, the request overhead will be tiny. + This is an internal implementation detail and should not be used by + endusers directly. + + Attributes: + mutate_rows_rate_limit (bool): + Notify the server that the client enables + batch write flow control by requesting + RateLimitInfo from MutateRowsResponse. + """ + + mutate_rows_rate_limit: bool = proto.Field( + proto.BOOL, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/noxfile.py b/noxfile.py index 164d138bd..8499a610f 100644 --- a/noxfile.py +++ b/noxfile.py @@ -40,7 +40,7 @@ "pytest-asyncio", ] UNIT_TEST_EXTERNAL_DEPENDENCIES = [ - "git+https://github.com/googleapis/python-api-core.git@retry_generators" + # "git+https://github.com/googleapis/python-api-core.git@retry_generators" ] UNIT_TEST_LOCAL_DEPENDENCIES = [] UNIT_TEST_DEPENDENCIES = [] @@ -55,7 +55,7 @@ "google-cloud-testutils", ] SYSTEM_TEST_EXTERNAL_DEPENDENCIES = [ - "git+https://github.com/googleapis/python-api-core.git@retry_generators" + # "git+https://github.com/googleapis/python-api-core.git@retry_generators" ] SYSTEM_TEST_LOCAL_DEPENDENCIES = [] UNIT_TEST_DEPENDENCIES = [] @@ -138,13 +138,11 @@ def mypy(session): session.install("google-cloud-testutils") session.run( "mypy", - "google/cloud/bigtable", + "google/cloud/bigtable/data", "--check-untyped-defs", "--warn-unreachable", "--disallow-any-generics", "--exclude", - "google/cloud/bigtable/deprecated", - "--exclude", "tests/system/v2_client", "--exclude", "tests/unit/v2_client", @@ -318,7 +316,6 @@ def system(session): "py.test", "--quiet", f"--junitxml=system_{session.python}_sponge_log.xml", - "--ignore=tests/system/v2_client", system_test_folder_path, *session.posargs, ) @@ -466,11 +463,6 @@ def prerelease_deps(session): ) session.run("python", "-c", "import grpc; print(grpc.__version__)") - # TODO: remove adter merging api-core - session.install( - "--upgrade", "--no-deps", "--force-reinstall", *UNIT_TEST_EXTERNAL_DEPENDENCIES - ) - session.run("py.test", "tests/unit") system_test_path = os.path.join("tests", "system.py") diff --git a/python-api-core b/python-api-core index 9ba76760f..a526d6593 160000 --- a/python-api-core +++ b/python-api-core @@ -1 +1 @@ -Subproject commit 9ba76760f5b7ba8128be85ca780811a0b9ec9087 +Subproject commit a526d659320939cd7f47ee775b250e8a3e3ab16b diff --git a/samples/beam/requirements-test.txt b/samples/beam/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/beam/requirements-test.txt +++ b/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/beam/requirements.txt b/samples/beam/requirements.txt index bcb270e72..8be9b98e0 100644 --- a/samples/beam/requirements.txt +++ b/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.45.0 +apache-beam==2.46.0 google-cloud-bigtable==2.17.0 google-cloud-core==2.3.2 diff --git a/samples/hello/README.md b/samples/hello/README.md index 0e1fc92f9..b3779fb43 100644 --- a/samples/hello/README.md +++ b/samples/hello/README.md @@ -17,7 +17,7 @@ Demonstrates how to connect to Cloud Bigtable and run some basic operations. Mor To run this sample: -1. If this is your first time working with GCP products, you will need to set up [the Cloud SDK][cloud_sdk] or utilize [Google Cloud Shell][gcloud_shell]. This sample may [require authetication][authentication] and you will need to [enable billing][enable_billing]. +1. If this is your first time working with GCP products, you will need to set up [the Cloud SDK][cloud_sdk] or utilize [Google Cloud Shell][gcloud_shell]. This sample may [require authentication][authentication] and you will need to [enable billing][enable_billing]. 1. Make a fork of this repo and clone the branch locally, then navigate to the sample directory you want to use. diff --git a/samples/hello/main.py b/samples/hello/main.py index 7b2b1764a..5e47b4a38 100644 --- a/samples/hello/main.py +++ b/samples/hello/main.py @@ -87,26 +87,30 @@ def main(project_id, instance_id, table_id): # [START bigtable_hw_create_filter] # Create a filter to only retrieve the most recent version of the cell - # for each column accross entire row. + # for each column across entire row. row_filter = row_filters.CellsColumnLimitFilter(1) # [END bigtable_hw_create_filter] # [START bigtable_hw_get_with_filter] + # [START bigtable_hw_get_by_key] print("Getting a single greeting by row key.") key = "greeting0".encode() row = table.read_row(key, row_filter) cell = row.cells[column_family_id][column][0] print(cell.value.decode("utf-8")) + # [END bigtable_hw_get_by_key] # [END bigtable_hw_get_with_filter] # [START bigtable_hw_scan_with_filter] + # [START bigtable_hw_scan_all] print("Scanning for all greetings:") partial_rows = table.read_rows(filter_=row_filter) for row in partial_rows: cell = row.cells[column_family_id][column][0] print(cell.value.decode("utf-8")) + # [END bigtable_hw_scan_all] # [END bigtable_hw_scan_with_filter] # [START bigtable_hw_delete_table] diff --git a/samples/hello/requirements-test.txt b/samples/hello/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/hello/requirements-test.txt +++ b/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/hello_happybase/requirements-test.txt b/samples/hello_happybase/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/hello_happybase/requirements-test.txt +++ b/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/instanceadmin/requirements-test.txt b/samples/instanceadmin/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/instanceadmin/requirements-test.txt +++ b/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/metricscaler/requirements-test.txt b/samples/metricscaler/requirements-test.txt index 82f315c7f..761227068 100644 --- a/samples/metricscaler/requirements-test.txt +++ b/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.2.2 -mock==5.0.1 +pytest==7.3.1 +mock==5.0.2 google-cloud-testutils diff --git a/samples/metricscaler/requirements.txt b/samples/metricscaler/requirements.txt index e9647809f..02e08b4c8 100644 --- a/samples/metricscaler/requirements.txt +++ b/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.17.0 -google-cloud-monitoring==2.14.1 +google-cloud-monitoring==2.14.2 diff --git a/samples/quickstart/requirements-test.txt b/samples/quickstart/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/quickstart/requirements-test.txt +++ b/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/quickstart_happybase/requirements-test.txt b/samples/quickstart_happybase/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/quickstart_happybase/requirements-test.txt +++ b/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/snippets/deletes/deletes_snippets.py b/samples/snippets/deletes/deletes_snippets.py index 4e89189db..8e78083bf 100644 --- a/samples/snippets/deletes/deletes_snippets.py +++ b/samples/snippets/deletes/deletes_snippets.py @@ -38,7 +38,7 @@ def delete_from_column_family(project_id, instance_id, table_id): table = instance.table(table_id) row = table.row("phone#4c410523#20190501") row.delete_cells( - column_family_id="cell_plan", columns=["data_plan_01gb", "data_plan_05gb"] + column_family_id="cell_plan", columns=row.ALL_COLUMNS ) row.commit() diff --git a/samples/snippets/deletes/requirements-test.txt b/samples/snippets/deletes/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/snippets/deletes/requirements-test.txt +++ b/samples/snippets/deletes/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/snippets/filters/requirements-test.txt b/samples/snippets/filters/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/snippets/filters/requirements-test.txt +++ b/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/snippets/reads/requirements-test.txt b/samples/snippets/reads/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/snippets/reads/requirements-test.txt +++ b/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/snippets/writes/requirements-test.txt b/samples/snippets/writes/requirements-test.txt index 8d6117f16..96aa71dab 100644 --- a/samples/snippets/writes/requirements-test.txt +++ b/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/tableadmin/requirements-test.txt b/samples/tableadmin/requirements-test.txt index d3ddc990f..ca1f33bd3 100644 --- a/samples/tableadmin/requirements-test.txt +++ b/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.2.2 +pytest==7.3.1 google-cloud-testutils==1.3.3 diff --git a/setup.py b/setup.py index 49bb10adc..e05b37c79 100644 --- a/setup.py +++ b/setup.py @@ -37,7 +37,7 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", + "google-api-core[grpc] == 2.12.0.dev0", # TODO: change to >= after streaming retries is merged "google-cloud-core >= 1.4.1, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", "proto-plus >= 1.22.0, <2.0.0dev", diff --git a/testing/constraints-3.7.txt b/testing/constraints-3.7.txt index 7bf769c9b..92b616563 100644 --- a/testing/constraints-3.7.txt +++ b/testing/constraints-3.7.txt @@ -5,9 +5,8 @@ # # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 -# TODO: reset after merging api-core submodule -# google-api-core==2.11.0 -google-cloud-core==1.4.1 +google-api-core==2.12.0.dev0 +google-cloud-core==2.3.2 grpc-google-iam-v1==0.12.4 proto-plus==1.22.0 libcst==0.2.5 diff --git a/google/cloud/bigtable/deprecated/__init__.py b/tests/system/data/__init__.py similarity index 64% rename from google/cloud/bigtable/deprecated/__init__.py rename to tests/system/data/__init__.py index a54fffdf1..89a37dc92 100644 --- a/google/cloud/bigtable/deprecated/__init__.py +++ b/tests/system/data/__init__.py @@ -1,4 +1,5 @@ -# Copyright 2015 Google LLC +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,15 +12,4 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Google Cloud Bigtable API package.""" - -from google.cloud.bigtable.deprecated.client import Client - -from google.cloud.bigtable import gapic_version as package_version - -__version__: str - -__version__ = package_version.__version__ - -__all__ = ["__version__", "Client"] +# diff --git a/tests/system/test_system.py b/tests/system/data/test_system.py similarity index 93% rename from tests/system/test_system.py rename to tests/system/data/test_system.py index e1771202a..548433444 100644 --- a/tests/system/test_system.py +++ b/tests/system/data/test_system.py @@ -20,7 +20,7 @@ from google.api_core import retry from google.api_core.exceptions import ClientError -from google.cloud.bigtable.read_modify_write_rules import MAX_INCREMENT_VALUE +from google.cloud.bigtable.data.read_modify_write_rules import MAX_INCREMENT_VALUE TEST_FAMILY = "test-family" TEST_FAMILY_2 = "test-family-2" @@ -135,10 +135,10 @@ def table_id(table_admin_client, project_id, instance_id): @pytest_asyncio.fixture(scope="session") async def client(): - from google.cloud.bigtable import BigtableDataClient + from google.cloud.bigtable.data import BigtableDataClientAsync project = os.getenv("GOOGLE_CLOUD_PROJECT") or None - async with BigtableDataClient(project=project) as client: + async with BigtableDataClientAsync(project=project) as client: yield client @@ -201,7 +201,7 @@ async def _retrieve_cell_value(table, row_key): """ Helper to read an individual row """ - from google.cloud.bigtable import ReadRowsQuery + from google.cloud.bigtable.data import ReadRowsQuery row_list = await table.read_rows(ReadRowsQuery(row_keys=row_key)) assert len(row_list) == 1 @@ -216,7 +216,7 @@ async def _create_row_and_mutation( """ Helper to create a new row, and a sample set_cell mutation to change its value """ - from google.cloud.bigtable.mutations import SetCell + from google.cloud.bigtable.data.mutations import SetCell row_key = uuid.uuid4().hex.encode() family = TEST_FAMILY @@ -303,7 +303,7 @@ async def test_bulk_mutations_set_cell(client, table, temp_rows): """ Ensure cells can be set properly """ - from google.cloud.bigtable.mutations import RowMutationEntry + from google.cloud.bigtable.data.mutations import RowMutationEntry new_value = uuid.uuid4().hex.encode() row_key, mutation = await _create_row_and_mutation( @@ -323,7 +323,7 @@ async def test_mutations_batcher_context_manager(client, table, temp_rows): """ test batcher with context manager. Should flush on exit """ - from google.cloud.bigtable.mutations import RowMutationEntry + from google.cloud.bigtable.data.mutations import RowMutationEntry new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] row_key, mutation = await _create_row_and_mutation( @@ -349,7 +349,7 @@ async def test_mutations_batcher_timer_flush(client, table, temp_rows): """ batch should occur after flush_interval seconds """ - from google.cloud.bigtable.mutations import RowMutationEntry + from google.cloud.bigtable.data.mutations import RowMutationEntry new_value = uuid.uuid4().hex.encode() row_key, mutation = await _create_row_and_mutation( @@ -373,7 +373,7 @@ async def test_mutations_batcher_count_flush(client, table, temp_rows): """ batch should flush after flush_limit_mutation_count mutations """ - from google.cloud.bigtable.mutations import RowMutationEntry + from google.cloud.bigtable.data.mutations import RowMutationEntry new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] row_key, mutation = await _create_row_and_mutation( @@ -407,7 +407,7 @@ async def test_mutations_batcher_bytes_flush(client, table, temp_rows): """ batch should flush after flush_limit_bytes bytes """ - from google.cloud.bigtable.mutations import RowMutationEntry + from google.cloud.bigtable.data.mutations import RowMutationEntry new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] row_key, mutation = await _create_row_and_mutation( @@ -442,7 +442,7 @@ async def test_mutations_batcher_no_flush(client, table, temp_rows): """ test with no flush requirements met """ - from google.cloud.bigtable.mutations import RowMutationEntry + from google.cloud.bigtable.data.mutations import RowMutationEntry new_value = uuid.uuid4().hex.encode() start_value = b"unchanged" @@ -494,7 +494,7 @@ async def test_read_modify_write_row_increment( """ test read_modify_write_row """ - from google.cloud.bigtable.read_modify_write_rules import IncrementRule + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule row_key = b"test-row-key" family = TEST_FAMILY @@ -531,7 +531,7 @@ async def test_read_modify_write_row_append( """ test read_modify_write_row """ - from google.cloud.bigtable.read_modify_write_rules import AppendValueRule + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule row_key = b"test-row-key" family = TEST_FAMILY @@ -554,8 +554,8 @@ async def test_read_modify_write_row_chained(client, table, temp_rows): """ test read_modify_write_row with multiple rules """ - from google.cloud.bigtable.read_modify_write_rules import AppendValueRule - from google.cloud.bigtable.read_modify_write_rules import IncrementRule + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule row_key = b"test-row-key" family = TEST_FAMILY @@ -599,8 +599,8 @@ async def test_check_and_mutate( """ test that check_and_mutate_row works applies the right mutations, and returns the right result """ - from google.cloud.bigtable.mutations import SetCell - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.mutations import SetCell + from google.cloud.bigtable.data.row_filters import ValueRangeFilter row_key = b"test-row-key" family = TEST_FAMILY @@ -671,7 +671,7 @@ async def test_read_rows_sharded_simple(table, temp_rows): """ Test read rows sharded with two queries """ - from google.cloud.bigtable.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery await temp_rows.add_row(b"a") await temp_rows.add_row(b"b") @@ -693,8 +693,8 @@ async def test_read_rows_sharded_from_sample(table, temp_rows): """ Test end-to-end sharding """ - from google.cloud.bigtable.read_rows_query import ReadRowsQuery - from google.cloud.bigtable.read_rows_query import RowRange + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import RowRange await temp_rows.add_row(b"a") await temp_rows.add_row(b"b") @@ -717,8 +717,8 @@ async def test_read_rows_sharded_filters_limits(table, temp_rows): """ Test read rows sharded with filters and limits """ - from google.cloud.bigtable.read_rows_query import ReadRowsQuery - from google.cloud.bigtable.row_filters import ApplyLabelFilter + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter await temp_rows.add_row(b"a") await temp_rows.add_row(b"b") @@ -745,8 +745,8 @@ async def test_read_rows_range_query(table, temp_rows): """ Ensure that the read_rows method works """ - from google.cloud.bigtable import ReadRowsQuery - from google.cloud.bigtable import RowRange + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data import RowRange await temp_rows.add_row(b"a") await temp_rows.add_row(b"b") @@ -766,7 +766,7 @@ async def test_read_rows_single_key_query(table, temp_rows): """ Ensure that the read_rows method works with specified query """ - from google.cloud.bigtable import ReadRowsQuery + from google.cloud.bigtable.data import ReadRowsQuery await temp_rows.add_row(b"a") await temp_rows.add_row(b"b") @@ -786,8 +786,8 @@ async def test_read_rows_with_filter(table, temp_rows): """ ensure filters are applied """ - from google.cloud.bigtable import ReadRowsQuery - from google.cloud.bigtable.row_filters import ApplyLabelFilter + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter await temp_rows.add_row(b"a") await temp_rows.add_row(b"b") @@ -828,7 +828,7 @@ async def test_read_rows_stream_inactive_timer(table, temp_rows): """ Ensure that the read_rows_stream method works """ - from google.cloud.bigtable.exceptions import IdleTimeout + from google.cloud.bigtable.data.exceptions import IdleTimeout await temp_rows.add_row(b"row_key_1") await temp_rows.add_row(b"row_key_2") @@ -848,7 +848,7 @@ async def test_read_row(table, temp_rows): """ Test read_row (single row helper) """ - from google.cloud.bigtable import Row + from google.cloud.bigtable.data import Row await temp_rows.add_row(b"row_key_1", value=b"value") row = await table.read_row(b"row_key_1") @@ -877,8 +877,8 @@ async def test_read_row_w_filter(table, temp_rows): """ Test read_row (single row helper) """ - from google.cloud.bigtable import Row - from google.cloud.bigtable.row_filters import ApplyLabelFilter + from google.cloud.bigtable.data import Row + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter await temp_rows.add_row(b"row_key_1", value=b"value") expected_label = "test-label" @@ -943,8 +943,8 @@ async def test_literal_value_filter( Literal value filter does complex escaping on re2 strings. Make sure inputs are properly interpreted by the server """ - from google.cloud.bigtable.row_filters import LiteralValueFilter - from google.cloud.bigtable import ReadRowsQuery + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + from google.cloud.bigtable.data import ReadRowsQuery f = LiteralValueFilter(filter_input) await temp_rows.add_row(b"row_key_1", value=cell_value) diff --git a/tests/system/v2_client/conftest.py b/tests/system/v2_client/conftest.py index bb4f54b41..f39fcba88 100644 --- a/tests/system/v2_client/conftest.py +++ b/tests/system/v2_client/conftest.py @@ -17,7 +17,7 @@ import pytest from test_utils.system import unique_resource_id -from google.cloud.bigtable.deprecated.client import Client +from google.cloud.bigtable.client import Client from google.cloud.environment_vars import BIGTABLE_EMULATOR from . import _helpers diff --git a/tests/system/v2_client/test_data_api.py b/tests/system/v2_client/test_data_api.py index 551a221ee..2ca7e1504 100644 --- a/tests/system/v2_client/test_data_api.py +++ b/tests/system/v2_client/test_data_api.py @@ -60,7 +60,7 @@ def rows_to_delete(): def test_table_read_rows_filter_millis(data_table): - from google.cloud.bigtable.deprecated import row_filters + from google.cloud.bigtable import row_filters end = datetime.datetime.now() start = end - datetime.timedelta(minutes=60) @@ -158,8 +158,8 @@ def test_table_drop_by_prefix(data_table, rows_to_delete): def test_table_read_rows_w_row_set(data_table, rows_to_delete): - from google.cloud.bigtable.deprecated.row_set import RowSet - from google.cloud.bigtable.deprecated.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange row_keys = [ b"row_key_1", @@ -189,7 +189,7 @@ def test_table_read_rows_w_row_set(data_table, rows_to_delete): def test_rowset_add_row_range_w_pfx(data_table, rows_to_delete): - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowSet row_keys = [ b"row_key_1", @@ -234,7 +234,7 @@ def _write_to_row(row1, row2, row3, row4): from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import UTC - from google.cloud.bigtable.deprecated.row_data import Cell + from google.cloud.bigtable.row_data import Cell timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC) timestamp1_micros = _microseconds_from_datetime(timestamp1) @@ -290,7 +290,7 @@ def test_table_read_row(data_table, rows_to_delete): def test_table_read_rows(data_table, rows_to_delete): - from google.cloud.bigtable.deprecated.row_data import PartialRowData + from google.cloud.bigtable.row_data import PartialRowData row = data_table.direct_row(ROW_KEY) rows_to_delete.append(row) @@ -326,10 +326,10 @@ def test_table_read_rows(data_table, rows_to_delete): def test_read_with_label_applied(data_table, rows_to_delete, skip_on_emulator): - from google.cloud.bigtable.deprecated.row_filters import ApplyLabelFilter - from google.cloud.bigtable.deprecated.row_filters import ColumnQualifierRegexFilter - from google.cloud.bigtable.deprecated.row_filters import RowFilterChain - from google.cloud.bigtable.deprecated.row_filters import RowFilterUnion + from google.cloud.bigtable.row_filters import ApplyLabelFilter + from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter + from google.cloud.bigtable.row_filters import RowFilterChain + from google.cloud.bigtable.row_filters import RowFilterUnion row = data_table.direct_row(ROW_KEY) rows_to_delete.append(row) diff --git a/tests/system/v2_client/test_instance_admin.py b/tests/system/v2_client/test_instance_admin.py index debe1ab56..e5e311213 100644 --- a/tests/system/v2_client/test_instance_admin.py +++ b/tests/system/v2_client/test_instance_admin.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from google.cloud.bigtable.deprecated import enums -from google.cloud.bigtable.deprecated.table import ClusterState +from google.cloud.bigtable import enums +from google.cloud.bigtable.table import ClusterState from . import _helpers @@ -149,7 +149,7 @@ def test_instance_create_prod( instances_to_delete, skip_on_emulator, ): - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import enums alt_instance_id = f"ndef{unique_suffix}" instance = admin_client.instance(alt_instance_id, labels=instance_labels) diff --git a/tests/system/v2_client/test_table_admin.py b/tests/system/v2_client/test_table_admin.py index 107ed41bf..c50189013 100644 --- a/tests/system/v2_client/test_table_admin.py +++ b/tests/system/v2_client/test_table_admin.py @@ -97,7 +97,7 @@ def test_table_create_w_families( data_instance_populated, tables_to_delete, ): - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule temp_table_id = "test-create-table-with-failies" column_family_id = "col-fam-id1" @@ -134,7 +134,7 @@ def test_table_create_w_split_keys( def test_column_family_create(data_instance_populated, tables_to_delete): - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule temp_table_id = "test-create-column-family" temp_table = data_instance_populated.table(temp_table_id) @@ -158,7 +158,7 @@ def test_column_family_create(data_instance_populated, tables_to_delete): def test_column_family_update(data_instance_populated, tables_to_delete): - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule temp_table_id = "test-update-column-family" temp_table = data_instance_populated.table(temp_table_id) @@ -219,8 +219,8 @@ def test_table_get_iam_policy( def test_table_set_iam_policy( service_account, data_instance_populated, tables_to_delete, skip_on_emulator ): - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE - from google.cloud.bigtable.deprecated.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import Policy temp_table_id = "test-set-iam-policy-table" temp_table = data_instance_populated.table(temp_table_id) @@ -264,7 +264,7 @@ def test_table_backup( skip_on_emulator, ): from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import enums temp_table_id = "test-backup-table" temp_table = data_instance_populated.table(temp_table_id) diff --git a/tests/unit/data/__init__.py b/tests/unit/data/__init__.py new file mode 100644 index 000000000..89a37dc92 --- /dev/null +++ b/tests/unit/data/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/test__mutate_rows.py b/tests/unit/data/_async/test__mutate_rows.py similarity index 93% rename from tests/unit/test__mutate_rows.py rename to tests/unit/data/_async/test__mutate_rows.py index 18b2beede..f77455d60 100644 --- a/tests/unit/test__mutate_rows.py +++ b/tests/unit/data/_async/test__mutate_rows.py @@ -36,9 +36,11 @@ def _make_mutation(count=1, size=1): class TestMutateRowsOperation: def _target_class(self): - from google.cloud.bigtable._mutate_rows import _MutateRowsOperation + from google.cloud.bigtable.data._async._mutate_rows import ( + _MutateRowsOperationAsync, + ) - return _MutateRowsOperation + return _MutateRowsOperationAsync def _make_one(self, *args, **kwargs): if not args: @@ -73,7 +75,7 @@ def test_ctor(self): """ test that constructor sets all the attributes correctly """ - from google.cloud.bigtable._mutate_rows import _MutateRowsIncomplete + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import ServiceUnavailable @@ -116,7 +118,7 @@ def test_ctor_too_many_entries(self): """ should raise an error if an operation is created with more than 100,000 entries """ - from google.cloud.bigtable._mutate_rows import ( + from google.cloud.bigtable.data._async._mutate_rows import ( MUTATE_ROWS_REQUEST_MUTATION_LIMIT, ) @@ -168,8 +170,8 @@ async def test_mutate_rows_exception(self, exc_type): """ exceptions raised from retryable should be raised in MutationsExceptionGroup """ - from google.cloud.bigtable.exceptions import MutationsExceptionGroup - from google.cloud.bigtable.exceptions import FailedMutationEntryError + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedMutationEntryError client = mock.Mock() table = mock.Mock() @@ -204,7 +206,9 @@ async def test_mutate_rows_exception_retryable_eventually_pass(self, exc_type): """ If an exception fails but eventually passes, it should not raise an exception """ - from google.cloud.bigtable._mutate_rows import _MutateRowsOperation + from google.cloud.bigtable.data._async._mutate_rows import ( + _MutateRowsOperationAsync, + ) client = mock.Mock() table = mock.Mock() @@ -213,7 +217,7 @@ async def test_mutate_rows_exception_retryable_eventually_pass(self, exc_type): expected_cause = exc_type("retry") num_retries = 2 with mock.patch.object( - _MutateRowsOperation, + _MutateRowsOperationAsync, "_run_attempt", AsyncMock(), ) as attempt_mock: @@ -229,8 +233,8 @@ async def test_mutate_rows_incomplete_ignored(self): """ MutateRowsIncomplete exceptions should not be added to error list """ - from google.cloud.bigtable._mutate_rows import _MutateRowsIncomplete - from google.cloud.bigtable.exceptions import MutationsExceptionGroup + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup from google.api_core.exceptions import DeadlineExceeded client = mock.Mock() @@ -286,7 +290,7 @@ async def test_run_attempt_empty_request(self): @pytest.mark.asyncio async def test_run_attempt_partial_success_retryable(self): """Some entries succeed, but one fails. Should report the proper index, and raise incomplete exception""" - from google.cloud.bigtable._mutate_rows import _MutateRowsIncomplete + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete success_mutation = _make_mutation() success_mutation_2 = _make_mutation() diff --git a/tests/unit/data/_async/test__read_rows.py b/tests/unit/data/_async/test__read_rows.py new file mode 100644 index 000000000..c7d52280c --- /dev/null +++ b/tests/unit/data/_async/test__read_rows.py @@ -0,0 +1,625 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import sys +import asyncio + +from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # type: ignore +except ImportError: # pragma: NO COVER + import mock # type: ignore + from mock import AsyncMock # type: ignore # noqa F401 + +TEST_FAMILY = "family_name" +TEST_QUALIFIER = b"qualifier" +TEST_TIMESTAMP = 123456789 +TEST_LABELS = ["label1", "label2"] + + +class TestReadRowsOperation: + """ + Tests helper functions in the ReadRowsOperation class + in-depth merging logic in merge_row_response_stream and _read_rows_retryable_attempt + is tested in test_read_rows_acceptance test_client_read_rows, and conformance tests + """ + + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync + + return _ReadRowsOperationAsync + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor_defaults(self): + request = {} + client = mock.Mock() + client.read_rows = mock.Mock() + client.read_rows.return_value = None + default_operation_timeout = 600 + time_gen_mock = mock.Mock() + with mock.patch( + "google.cloud.bigtable.data._async._read_rows._attempt_timeout_generator", + time_gen_mock, + ): + instance = self._make_one(request, client) + assert time_gen_mock.call_count == 1 + time_gen_mock.assert_called_once_with(None, default_operation_timeout) + assert instance.transient_errors == [] + assert instance._last_emitted_row_key is None + assert instance._emit_count == 0 + assert instance.operation_timeout == default_operation_timeout + retryable_fn = instance._partial_retryable + assert retryable_fn.func == instance._read_rows_retryable_attempt + assert retryable_fn.args[0] == client.read_rows + assert retryable_fn.args[1] == time_gen_mock.return_value + assert retryable_fn.args[2] == 0 + assert client.read_rows.call_count == 0 + + def test_ctor(self): + row_limit = 91 + request = {"rows_limit": row_limit} + client = mock.Mock() + client.read_rows = mock.Mock() + client.read_rows.return_value = None + expected_operation_timeout = 42 + expected_request_timeout = 44 + time_gen_mock = mock.Mock() + with mock.patch( + "google.cloud.bigtable.data._async._read_rows._attempt_timeout_generator", + time_gen_mock, + ): + instance = self._make_one( + request, + client, + operation_timeout=expected_operation_timeout, + per_request_timeout=expected_request_timeout, + ) + assert time_gen_mock.call_count == 1 + time_gen_mock.assert_called_once_with( + expected_request_timeout, expected_operation_timeout + ) + assert instance.transient_errors == [] + assert instance._last_emitted_row_key is None + assert instance._emit_count == 0 + assert instance.operation_timeout == expected_operation_timeout + retryable_fn = instance._partial_retryable + assert retryable_fn.func == instance._read_rows_retryable_attempt + assert retryable_fn.args[0] == client.read_rows + assert retryable_fn.args[1] == time_gen_mock.return_value + assert retryable_fn.args[2] == row_limit + assert client.read_rows.call_count == 0 + + def test___aiter__(self): + request = {} + client = mock.Mock() + client.read_rows = mock.Mock() + instance = self._make_one(request, client) + assert instance.__aiter__() is instance + + @pytest.mark.asyncio + async def test_transient_error_capture(self): + from google.api_core import exceptions as core_exceptions + + client = mock.Mock() + client.read_rows = mock.Mock() + test_exc = core_exceptions.Aborted("test") + test_exc2 = core_exceptions.DeadlineExceeded("test") + client.read_rows.side_effect = [test_exc, test_exc2] + instance = self._make_one({}, client) + with pytest.raises(RuntimeError): + await instance.__anext__() + assert len(instance.transient_errors) == 2 + assert instance.transient_errors[0] == test_exc + assert instance.transient_errors[1] == test_exc2 + + @pytest.mark.parametrize( + "in_keys,last_key,expected", + [ + (["b", "c", "d"], "a", ["b", "c", "d"]), + (["a", "b", "c"], "b", ["c"]), + (["a", "b", "c"], "c", []), + (["a", "b", "c"], "d", []), + (["d", "c", "b", "a"], "b", ["d", "c"]), + ], + ) + def test_revise_request_rowset_keys(self, in_keys, last_key, expected): + sample_range = {"start_key_open": last_key} + row_set = {"row_keys": in_keys, "row_ranges": [sample_range]} + revised = self._get_target_class()._revise_request_rowset(row_set, last_key) + assert revised["row_keys"] == expected + assert revised["row_ranges"] == [sample_range] + + @pytest.mark.parametrize( + "in_ranges,last_key,expected", + [ + ( + [{"start_key_open": "b", "end_key_closed": "d"}], + "a", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_closed": "b", "end_key_closed": "d"}], + "a", + [{"start_key_closed": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_open": "a", "end_key_closed": "d"}], + "b", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_closed": "a", "end_key_open": "d"}], + "b", + [{"start_key_open": "b", "end_key_open": "d"}], + ), + ( + [{"start_key_closed": "b", "end_key_closed": "d"}], + "b", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ([{"start_key_closed": "b", "end_key_closed": "d"}], "d", []), + ([{"start_key_closed": "b", "end_key_open": "d"}], "d", []), + ([{"start_key_closed": "b", "end_key_closed": "d"}], "e", []), + ([{"start_key_closed": "b"}], "z", [{"start_key_open": "z"}]), + ([{"start_key_closed": "b"}], "a", [{"start_key_closed": "b"}]), + ( + [{"end_key_closed": "z"}], + "a", + [{"start_key_open": "a", "end_key_closed": "z"}], + ), + ( + [{"end_key_open": "z"}], + "a", + [{"start_key_open": "a", "end_key_open": "z"}], + ), + ], + ) + def test_revise_request_rowset_ranges(self, in_ranges, last_key, expected): + next_key = last_key + "a" + row_set = {"row_keys": [next_key], "row_ranges": in_ranges} + revised = self._get_target_class()._revise_request_rowset(row_set, last_key) + assert revised["row_keys"] == [next_key] + assert revised["row_ranges"] == expected + + @pytest.mark.parametrize("last_key", ["a", "b", "c"]) + def test_revise_request_full_table(self, last_key): + row_set = {"row_keys": [], "row_ranges": []} + for selected_set in [row_set, None]: + revised = self._get_target_class()._revise_request_rowset( + selected_set, last_key + ) + assert revised["row_keys"] == [] + assert len(revised["row_ranges"]) == 1 + assert revised["row_ranges"][0]["start_key_open"] == last_key + + def test_revise_to_empty_rowset(self): + """revising to an empty rowset should raise error""" + from google.cloud.bigtable.data.exceptions import _RowSetComplete + + row_keys = ["a", "b", "c"] + row_set = {"row_keys": row_keys, "row_ranges": [{"end_key_open": "c"}]} + with pytest.raises(_RowSetComplete): + self._get_target_class()._revise_request_rowset(row_set, "d") + + @pytest.mark.parametrize( + "start_limit,emit_num,expected_limit", + [ + (10, 0, 10), + (10, 1, 9), + (10, 10, 0), + (0, 10, 0), + (0, 0, 0), + (4, 2, 2), + ], + ) + @pytest.mark.asyncio + async def test_revise_limit(self, start_limit, emit_num, expected_limit): + """ + revise_limit should revise the request's limit field + - if limit is 0 (unlimited), it should never be revised + - if start_limit-emit_num == 0, the request should end early + - if the number emitted exceeds the new limit, an exception should + should be raised (tested in test_revise_limit_over_limit) + """ + import itertools + + request = {"rows_limit": start_limit} + instance = self._make_one(request, mock.Mock()) + instance._emit_count = emit_num + instance._last_emitted_row_key = "a" + gapic_mock = mock.Mock() + gapic_mock.side_effect = [GeneratorExit("stop_fn")] + mock_timeout_gen = itertools.repeat(5) + + attempt = instance._read_rows_retryable_attempt( + gapic_mock, mock_timeout_gen, start_limit + ) + if start_limit != 0 and expected_limit == 0: + # if we emitted the expected number of rows, we should receive a StopAsyncIteration + with pytest.raises(StopAsyncIteration): + await attempt.__anext__() + else: + with pytest.raises(GeneratorExit): + await attempt.__anext__() + assert request["rows_limit"] == expected_limit + + @pytest.mark.parametrize("start_limit,emit_num", [(5, 10), (3, 9), (1, 10)]) + @pytest.mark.asyncio + async def test_revise_limit_over_limit(self, start_limit, emit_num): + """ + Should raise runtime error if we get in state where emit_num > start_num + (unless start_num == 0, which represents unlimited) + """ + import itertools + + request = {"rows_limit": start_limit} + instance = self._make_one(request, mock.Mock()) + instance._emit_count = emit_num + instance._last_emitted_row_key = "a" + mock_timeout_gen = itertools.repeat(5) + attempt = instance._read_rows_retryable_attempt( + mock.Mock(), mock_timeout_gen, start_limit + ) + with pytest.raises(RuntimeError) as e: + await attempt.__anext__() + assert "emit count exceeds row limit" in str(e.value) + + @pytest.mark.asyncio + async def test_aclose(self): + import asyncio + + instance = self._make_one({}, mock.Mock()) + await instance.aclose() + assert instance._stream is None + assert instance._last_emitted_row_key is None + with pytest.raises(asyncio.InvalidStateError): + await instance.__anext__() + # try calling a second time + await instance.aclose() + + @pytest.mark.parametrize("limit", [1, 3, 10]) + @pytest.mark.asyncio + async def test_retryable_attempt_hit_limit(self, limit): + """ + Stream should end after hitting the limit + """ + from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse + import itertools + + instance = self._make_one({}, mock.Mock()) + + async def mock_gapic(*args, **kwargs): + # continuously return a single row + async def gen(): + for i in range(limit * 2): + chunk = ReadRowsResponse.CellChunk( + row_key=str(i).encode(), + family_name="family_name", + qualifier=b"qualifier", + commit_row=True, + ) + yield ReadRowsResponse(chunks=[chunk]) + + return gen() + + mock_timeout_gen = itertools.repeat(5) + gen = instance._read_rows_retryable_attempt(mock_gapic, mock_timeout_gen, limit) + # should yield values up to the limit + for i in range(limit): + await gen.__anext__() + # next value should be StopAsyncIteration + with pytest.raises(StopAsyncIteration): + await gen.__anext__() + + @pytest.mark.asyncio + async def test_retryable_ignore_repeated_rows(self): + """ + Duplicate rows should cause an invalid chunk error + """ + from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync + from google.cloud.bigtable.data.row import Row + from google.cloud.bigtable.data.exceptions import InvalidChunk + + async def mock_stream(): + while True: + yield Row(b"dup_key", cells=[]) + yield Row(b"dup_key", cells=[]) + + with mock.patch.object( + _ReadRowsOperationAsync, "merge_row_response_stream" + ) as mock_stream_fn: + mock_stream_fn.return_value = mock_stream() + instance = self._make_one({}, mock.AsyncMock()) + first_row = await instance.__anext__() + assert first_row.row_key == b"dup_key" + with pytest.raises(InvalidChunk) as exc: + await instance.__anext__() + assert "Last emitted row key out of order" in str(exc.value) + + @pytest.mark.asyncio + async def test_retryable_ignore_last_scanned_rows(self): + """ + Last scanned rows should not be emitted + """ + from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync + from google.cloud.bigtable.data.row import Row, _LastScannedRow + + async def mock_stream(): + while True: + yield Row(b"key1", cells=[]) + yield _LastScannedRow(b"key2_ignored") + yield Row(b"key3", cells=[]) + + with mock.patch.object( + _ReadRowsOperationAsync, "merge_row_response_stream" + ) as mock_stream_fn: + mock_stream_fn.return_value = mock_stream() + instance = self._make_one({}, mock.AsyncMock()) + first_row = await instance.__anext__() + assert first_row.row_key == b"key1" + second_row = await instance.__anext__() + assert second_row.row_key == b"key3" + + @pytest.mark.asyncio + async def test_retryable_cancel_on_close(self): + """Underlying gapic call should be cancelled when stream is closed""" + from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync + from google.cloud.bigtable.data.row import Row + + async def mock_stream(): + while True: + yield Row(b"key1", cells=[]) + + with mock.patch.object( + _ReadRowsOperationAsync, "merge_row_response_stream" + ) as mock_stream_fn: + mock_stream_fn.return_value = mock_stream() + mock_gapic = mock.AsyncMock() + mock_call = await mock_gapic.read_rows() + instance = self._make_one({}, mock_gapic) + await instance.__anext__() + assert mock_call.cancel.call_count == 0 + await instance.aclose() + assert mock_call.cancel.call_count == 1 + + +class MockStream(_ReadRowsOperationAsync): + """ + Mock a _ReadRowsOperationAsync stream for testing + """ + + def __init__(self, items=None, errors=None, operation_timeout=None): + self.transient_errors = errors + self.operation_timeout = operation_timeout + self.next_idx = 0 + if items is None: + items = list(range(10)) + self.items = items + + def __aiter__(self): + return self + + async def __anext__(self): + if self.next_idx >= len(self.items): + raise StopAsyncIteration + item = self.items[self.next_idx] + self.next_idx += 1 + if isinstance(item, Exception): + raise item + return item + + async def aclose(self): + pass + + +class TestReadRowsAsyncIterator: + async def mock_stream(self, size=10): + for i in range(size): + yield i + + def _make_one(self, *args, **kwargs): + from google.cloud.bigtable.data._async._read_rows import ReadRowsAsyncIterator + + stream = MockStream(*args, **kwargs) + return ReadRowsAsyncIterator(stream) + + def test_ctor(self): + with mock.patch("time.monotonic", return_value=0): + iterator = self._make_one() + assert iterator._last_interaction_time == 0 + assert iterator._idle_timeout_task is None + assert iterator.active is True + + def test___aiter__(self): + iterator = self._make_one() + assert iterator.__aiter__() is iterator + + @pytest.mark.skipif( + sys.version_info < (3, 8), reason="mock coroutine requires python3.8 or higher" + ) + @pytest.mark.asyncio + async def test__start_idle_timer(self): + """Should start timer coroutine""" + iterator = self._make_one() + expected_timeout = 10 + with mock.patch("time.monotonic", return_value=1): + with mock.patch.object(iterator, "_idle_timeout_coroutine") as mock_coro: + await iterator._start_idle_timer(expected_timeout) + assert mock_coro.call_count == 1 + assert mock_coro.call_args[0] == (expected_timeout,) + assert iterator._last_interaction_time == 1 + assert iterator._idle_timeout_task is not None + + @pytest.mark.skipif( + sys.version_info < (3, 8), reason="mock coroutine requires python3.8 or higher" + ) + @pytest.mark.asyncio + async def test__start_idle_timer_duplicate(self): + """Multiple calls should replace task""" + iterator = self._make_one() + with mock.patch.object(iterator, "_idle_timeout_coroutine") as mock_coro: + await iterator._start_idle_timer(1) + first_task = iterator._idle_timeout_task + await iterator._start_idle_timer(2) + second_task = iterator._idle_timeout_task + assert mock_coro.call_count == 2 + + assert first_task is not None + assert first_task != second_task + # old tasks hould be cancelled + with pytest.raises(asyncio.CancelledError): + await first_task + # new task should not be cancelled + await second_task + + @pytest.mark.asyncio + async def test__idle_timeout_coroutine(self): + from google.cloud.bigtable.data.exceptions import IdleTimeout + + iterator = self._make_one() + await iterator._idle_timeout_coroutine(0.05) + await asyncio.sleep(0.1) + assert iterator.active is False + with pytest.raises(IdleTimeout): + await iterator.__anext__() + + @pytest.mark.asyncio + async def test__idle_timeout_coroutine_extensions(self): + """touching the generator should reset the idle timer""" + iterator = self._make_one(items=list(range(100))) + await iterator._start_idle_timer(0.05) + for i in range(10): + # will not expire as long as it is in use + assert iterator.active is True + await iterator.__anext__() + await asyncio.sleep(0.03) + # now let it expire + await asyncio.sleep(0.5) + assert iterator.active is False + + @pytest.mark.asyncio + async def test___anext__(self): + num_rows = 10 + iterator = self._make_one(items=list(range(num_rows))) + for i in range(num_rows): + assert await iterator.__anext__() == i + with pytest.raises(StopAsyncIteration): + await iterator.__anext__() + + @pytest.mark.asyncio + async def test___anext__with_deadline_error(self): + """ + RetryErrors mean a deadline has been hit. + Should be wrapped in a DeadlineExceeded exception + """ + from google.api_core import exceptions as core_exceptions + + items = [1, core_exceptions.RetryError("retry error", None)] + expected_timeout = 99 + iterator = self._make_one(items=items, operation_timeout=expected_timeout) + assert await iterator.__anext__() == 1 + with pytest.raises(core_exceptions.DeadlineExceeded) as exc: + await iterator.__anext__() + assert f"operation_timeout of {expected_timeout:0.1f}s exceeded" in str( + exc.value + ) + assert exc.value.__cause__ is None + + @pytest.mark.asyncio + async def test___anext__with_deadline_error_with_cause(self): + """ + Transient errors should be exposed as an error group + """ + from google.api_core import exceptions as core_exceptions + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + items = [1, core_exceptions.RetryError("retry error", None)] + expected_timeout = 99 + errors = [RuntimeError("error1"), ValueError("error2")] + iterator = self._make_one( + items=items, operation_timeout=expected_timeout, errors=errors + ) + assert await iterator.__anext__() == 1 + with pytest.raises(core_exceptions.DeadlineExceeded) as exc: + await iterator.__anext__() + assert f"operation_timeout of {expected_timeout:0.1f}s exceeded" in str( + exc.value + ) + error_group = exc.value.__cause__ + assert isinstance(error_group, RetryExceptionGroup) + assert len(error_group.exceptions) == 2 + assert error_group.exceptions[0] is errors[0] + assert error_group.exceptions[1] is errors[1] + assert "2 failed attempts" in str(error_group) + + @pytest.mark.asyncio + async def test___anext__with_error(self): + """ + Other errors should be raised as-is + """ + from google.api_core import exceptions as core_exceptions + + items = [1, core_exceptions.InternalServerError("mock error")] + iterator = self._make_one(items=items) + assert await iterator.__anext__() == 1 + with pytest.raises(core_exceptions.InternalServerError) as exc: + await iterator.__anext__() + assert exc.value is items[1] + assert iterator.active is False + # next call should raise same error + with pytest.raises(core_exceptions.InternalServerError) as exc: + await iterator.__anext__() + + @pytest.mark.asyncio + async def test__finish_with_error(self): + iterator = self._make_one() + await iterator._start_idle_timer(10) + timeout_task = iterator._idle_timeout_task + assert await iterator.__anext__() == 0 + assert iterator.active is True + err = ZeroDivisionError("mock error") + await iterator._finish_with_error(err) + assert iterator.active is False + assert iterator._error is err + assert iterator._idle_timeout_task is None + with pytest.raises(ZeroDivisionError) as exc: + await iterator.__anext__() + assert exc.value is err + # timeout task should be cancelled + with pytest.raises(asyncio.CancelledError): + await timeout_task + + @pytest.mark.asyncio + async def test_aclose(self): + iterator = self._make_one() + await iterator._start_idle_timer(10) + timeout_task = iterator._idle_timeout_task + assert await iterator.__anext__() == 0 + assert iterator.active is True + await iterator.aclose() + assert iterator.active is False + assert isinstance(iterator._error, StopAsyncIteration) + assert iterator._idle_timeout_task is None + with pytest.raises(StopAsyncIteration) as e: + await iterator.__anext__() + assert "closed" in str(e.value) + # timeout task should be cancelled + with pytest.raises(asyncio.CancelledError): + await timeout_task diff --git a/tests/unit/test_client.py b/tests/unit/data/_async/test_client.py similarity index 95% rename from tests/unit/test_client.py rename to tests/unit/data/_async/test_client.py index 3557c1c16..25006d725 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/data/_async/test_client.py @@ -20,15 +20,15 @@ import pytest -from google.cloud.bigtable import mutations +from google.cloud.bigtable.data import mutations from google.auth.credentials import AnonymousCredentials from google.cloud.bigtable_v2.types import ReadRowsResponse -from google.cloud.bigtable.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery from google.api_core import exceptions as core_exceptions -from google.cloud.bigtable.exceptions import InvalidChunk +from google.cloud.bigtable.data.exceptions import InvalidChunk -from google.cloud.bigtable.read_modify_write_rules import IncrementRule -from google.cloud.bigtable.read_modify_write_rules import AppendValueRule +from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule +from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule # try/except added for compatibility with python < 3.8 try: @@ -43,11 +43,11 @@ ) -class TestBigtableDataClient: +class TestBigtableDataClientAsync: def _get_target_class(self): - from google.cloud.bigtable.client import BigtableDataClient + from google.cloud.bigtable.data._async.client import BigtableDataClientAsync - return BigtableDataClient + return BigtableDataClientAsync def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) @@ -118,7 +118,6 @@ async def test_ctor_dict_options(self): BigtableAsyncClient, ) from google.api_core.client_options import ClientOptions - from google.cloud.bigtable.client import BigtableDataClient client_options = {"api_endpoint": "foo.bar:1234"} with mock.patch.object(BigtableAsyncClient, "__init__") as bigtable_client_init: @@ -132,7 +131,7 @@ async def test_ctor_dict_options(self): assert called_options.api_endpoint == "foo.bar:1234" assert isinstance(called_options, ClientOptions) with mock.patch.object( - BigtableDataClient, "start_background_channel_refresh" + self._get_target_class(), "start_background_channel_refresh" ) as start_background_refresh: client = self._make_one(client_options=client_options) start_background_refresh.assert_called_once() @@ -275,7 +274,7 @@ async def test_start_background_channel_refresh_tasks_names(self): for i in range(pool_size): name = client._channel_refresh_tasks[i].get_name() assert str(i) in name - assert "BigtableDataClient channel refresh " in name + assert "BigtableDataClientAsync channel refresh " in name await client.close() @pytest.mark.asyncio @@ -725,7 +724,7 @@ async def test__multiple_table_registration(self): add multiple owners to instance_owners, but only keep one copy of shared key in active_instances """ - from google.cloud.bigtable.client import _WarmedInstanceKey + from google.cloud.bigtable.data._async.client import _WarmedInstanceKey async with self._make_one(project="project-id") as client: async with client.get_table("instance_1", "table_1") as table_1: @@ -773,7 +772,7 @@ async def test__multiple_instance_registration(self): registering with multiple instance keys should update the key in instance_owners and active_instances """ - from google.cloud.bigtable.client import _WarmedInstanceKey + from google.cloud.bigtable.data._async.client import _WarmedInstanceKey async with self._make_one(project="project-id") as client: async with client.get_table("instance_1", "table_1") as table_1: @@ -808,8 +807,8 @@ async def test__multiple_instance_registration(self): @pytest.mark.asyncio async def test_get_table(self): - from google.cloud.bigtable.client import Table - from google.cloud.bigtable.client import _WarmedInstanceKey + from google.cloud.bigtable.data._async.client import TableAsync + from google.cloud.bigtable.data._async.client import _WarmedInstanceKey client = self._make_one(project="project-id") assert not client._active_instances @@ -822,7 +821,7 @@ async def test_get_table(self): expected_app_profile_id, ) await asyncio.sleep(0) - assert isinstance(table, Table) + assert isinstance(table, TableAsync) assert table.table_id == expected_table_id assert ( table.table_name @@ -844,15 +843,15 @@ async def test_get_table(self): @pytest.mark.asyncio async def test_get_table_context_manager(self): - from google.cloud.bigtable.client import Table - from google.cloud.bigtable.client import _WarmedInstanceKey + from google.cloud.bigtable.data._async.client import TableAsync + from google.cloud.bigtable.data._async.client import _WarmedInstanceKey expected_table_id = "table-id" expected_instance_id = "instance-id" expected_app_profile_id = "app-profile-id" expected_project_id = "project-id" - with mock.patch.object(Table, "close") as close_mock: + with mock.patch.object(TableAsync, "close") as close_mock: async with self._make_one(project=expected_project_id) as client: async with client.get_table( expected_instance_id, @@ -860,7 +859,7 @@ async def test_get_table_context_manager(self): expected_app_profile_id, ) as table: await asyncio.sleep(0) - assert isinstance(table, Table) + assert isinstance(table, TableAsync) assert table.table_id == expected_table_id assert ( table.table_name @@ -950,35 +949,36 @@ async def test_context_manager(self): def test_client_ctor_sync(self): # initializing client in a sync context should raise RuntimeError - from google.cloud.bigtable.client import BigtableDataClient + from google.cloud.bigtable.data._async.client import BigtableDataClientAsync with pytest.warns(RuntimeWarning) as warnings: - client = BigtableDataClient(project="project-id") + client = BigtableDataClientAsync(project="project-id") expected_warning = [w for w in warnings if "client.py" in w.filename] assert len(expected_warning) == 1 - assert "BigtableDataClient should be started in an asyncio event loop." in str( - expected_warning[0].message + assert ( + "BigtableDataClientAsync should be started in an asyncio event loop." + in str(expected_warning[0].message) ) assert client.project == "project-id" assert client._channel_refresh_tasks == [] -class TestTable: +class TestTableAsync: @pytest.mark.asyncio async def test_table_ctor(self): - from google.cloud.bigtable.client import BigtableDataClient - from google.cloud.bigtable.client import Table - from google.cloud.bigtable.client import _WarmedInstanceKey + from google.cloud.bigtable.data._async.client import BigtableDataClientAsync + from google.cloud.bigtable.data._async.client import TableAsync + from google.cloud.bigtable.data._async.client import _WarmedInstanceKey expected_table_id = "table-id" expected_instance_id = "instance-id" expected_app_profile_id = "app-profile-id" expected_operation_timeout = 123 expected_per_request_timeout = 12 - client = BigtableDataClient() + client = BigtableDataClientAsync() assert not client._active_instances - table = Table( + table = TableAsync( client, expected_instance_id, expected_table_id, @@ -1007,19 +1007,19 @@ async def test_table_ctor(self): @pytest.mark.asyncio async def test_table_ctor_bad_timeout_values(self): - from google.cloud.bigtable.client import BigtableDataClient - from google.cloud.bigtable.client import Table + from google.cloud.bigtable.data._async.client import BigtableDataClientAsync + from google.cloud.bigtable.data._async.client import TableAsync - client = BigtableDataClient() + client = BigtableDataClientAsync() with pytest.raises(ValueError) as e: - Table(client, "", "", default_per_request_timeout=-1) + TableAsync(client, "", "", default_per_request_timeout=-1) assert "default_per_request_timeout must be greater than 0" in str(e.value) with pytest.raises(ValueError) as e: - Table(client, "", "", default_operation_timeout=-1) + TableAsync(client, "", "", default_operation_timeout=-1) assert "default_operation_timeout must be greater than 0" in str(e.value) with pytest.raises(ValueError) as e: - Table( + TableAsync( client, "", "", @@ -1034,12 +1034,12 @@ async def test_table_ctor_bad_timeout_values(self): def test_table_ctor_sync(self): # initializing client in a sync context should raise RuntimeError - from google.cloud.bigtable.client import Table + from google.cloud.bigtable.data._async.client import TableAsync client = mock.Mock() with pytest.raises(RuntimeError) as e: - Table(client, "instance-id", "table-id") - assert e.match("Table must be created within an async event loop context.") + TableAsync(client, "instance-id", "table-id") + assert e.match("TableAsync must be created within an async event loop context.") class TestReadRows: @@ -1048,12 +1048,12 @@ class TestReadRows: """ def _make_client(self, *args, **kwargs): - from google.cloud.bigtable.client import BigtableDataClient + from google.cloud.bigtable.data._async.client import BigtableDataClientAsync - return BigtableDataClient(*args, **kwargs) + return BigtableDataClientAsync(*args, **kwargs) def _make_table(self, *args, **kwargs): - from google.cloud.bigtable.client import Table + from google.cloud.bigtable.data._async.client import TableAsync client_mock = mock.Mock() client_mock._register_instance.side_effect = ( @@ -1070,7 +1070,7 @@ def _make_table(self, *args, **kwargs): ) client_mock._gapic_client.table_path.return_value = kwargs["table_id"] client_mock._gapic_client.instance_path.return_value = kwargs["instance_id"] - return Table(client_mock, *args, **kwargs) + return TableAsync(client_mock, *args, **kwargs) def _make_stats(self): from google.cloud.bigtable_v2.types import RequestStats @@ -1174,7 +1174,7 @@ async def test_read_rows_stream(self): @pytest.mark.parametrize("include_app_profile", [True, False]) @pytest.mark.asyncio async def test_read_rows_query_matches_request(self, include_app_profile): - from google.cloud.bigtable import RowRange + from google.cloud.bigtable.data import RowRange app_profile_id = "app_profile_id" if include_app_profile else None async with self._make_table(app_profile_id=app_profile_id) as table: @@ -1250,7 +1250,7 @@ async def test_read_rows_per_request_timeout( operation_timeout does not cancel the request, so we expect the number of requests to be the ceiling of operation_timeout / per_request_timeout. """ - from google.cloud.bigtable.exceptions import RetryExceptionGroup + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup expected_last_timeout = operation_t - (expected_num - 1) * per_request_t @@ -1295,12 +1295,12 @@ async def test_read_rows_per_request_timeout( @pytest.mark.asyncio async def test_read_rows_idle_timeout(self): - from google.cloud.bigtable.client import ReadRowsIterator + from google.cloud.bigtable.data._async.client import ReadRowsAsyncIterator from google.cloud.bigtable_v2.services.bigtable.async_client import ( BigtableAsyncClient, ) - from google.cloud.bigtable.exceptions import IdleTimeout - from google.cloud.bigtable._read_rows import _ReadRowsOperation + from google.cloud.bigtable.data.exceptions import IdleTimeout + from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync chunks = [ self._make_chunk(row_key=b"test_1"), @@ -1311,7 +1311,7 @@ async def test_read_rows_idle_timeout(self): chunks ) with mock.patch.object( - ReadRowsIterator, "_start_idle_timer" + ReadRowsAsyncIterator, "_start_idle_timer" ) as start_idle_timer: client = self._make_client() table = client.get_table("instance", "table") @@ -1319,7 +1319,9 @@ async def test_read_rows_idle_timeout(self): gen = await table.read_rows_stream(query) # should start idle timer on creation start_idle_timer.assert_called_once() - with mock.patch.object(_ReadRowsOperation, "aclose", AsyncMock()) as aclose: + with mock.patch.object( + _ReadRowsOperationAsync, "aclose", AsyncMock() + ) as aclose: # start idle timer with our own value await gen._start_idle_timer(0.1) # should timeout after being abandoned @@ -1398,13 +1400,13 @@ async def test_read_rows_revise_request(self): """ Ensure that _revise_request is called between retries """ - from google.cloud.bigtable._read_rows import _ReadRowsOperation - from google.cloud.bigtable.exceptions import InvalidChunk + from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync + from google.cloud.bigtable.data.exceptions import InvalidChunk with mock.patch.object( - _ReadRowsOperation, "_revise_request_rowset" + _ReadRowsOperationAsync, "_revise_request_rowset" ) as revise_rowset: - with mock.patch.object(_ReadRowsOperation, "aclose"): + with mock.patch.object(_ReadRowsOperationAsync, "aclose"): revise_rowset.return_value = "modified" async with self._make_table() as table: read_rows = table.client._gapic_client.read_rows @@ -1432,11 +1434,11 @@ async def test_read_rows_default_timeouts(self): """ Ensure that the default timeouts are set on the read rows operation when not overridden """ - from google.cloud.bigtable._read_rows import _ReadRowsOperation + from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync operation_timeout = 8 per_request_timeout = 4 - with mock.patch.object(_ReadRowsOperation, "__init__") as mock_op: + with mock.patch.object(_ReadRowsOperationAsync, "__init__") as mock_op: mock_op.side_effect = RuntimeError("mock error") async with self._make_table( default_operation_timeout=operation_timeout, @@ -1455,11 +1457,11 @@ async def test_read_rows_default_timeout_override(self): """ When timeouts are passed, they overwrite default values """ - from google.cloud.bigtable._read_rows import _ReadRowsOperation + from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync operation_timeout = 8 per_request_timeout = 4 - with mock.patch.object(_ReadRowsOperation, "__init__") as mock_op: + with mock.patch.object(_ReadRowsOperationAsync, "__init__") as mock_op: mock_op.side_effect = RuntimeError("mock error") async with self._make_table( default_operation_timeout=99, default_per_request_timeout=97 @@ -1653,9 +1655,9 @@ async def test_read_rows_metadata(self, include_app_profile): class TestReadRowsSharded: def _make_client(self, *args, **kwargs): - from google.cloud.bigtable.client import BigtableDataClient + from google.cloud.bigtable.data._async.client import BigtableDataClientAsync - return BigtableDataClient(*args, **kwargs) + return BigtableDataClientAsync(*args, **kwargs) @pytest.mark.asyncio async def test_read_rows_sharded_empty_query(self): @@ -1708,8 +1710,8 @@ async def test_read_rows_sharded_errors(self): """ Errors should be exposed as ShardedReadRowsExceptionGroups """ - from google.cloud.bigtable.exceptions import ShardedReadRowsExceptionGroup - from google.cloud.bigtable.exceptions import FailedQueryShardError + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedQueryShardError async with self._make_client() as client: async with client.get_table("instance", "table") as table: @@ -1785,8 +1787,8 @@ async def test_read_rows_sharded_batching(self): Large queries should be processed in batches to limit concurrency operation timeout should change between batches """ - from google.cloud.bigtable.client import Table - from google.cloud.bigtable.client import CONCURRENCY_LIMIT + from google.cloud.bigtable.data._async.client import TableAsync + from google.cloud.bigtable.data._async.client import CONCURRENCY_LIMIT assert CONCURRENCY_LIMIT == 10 # change this test if this changes @@ -1802,7 +1804,7 @@ async def test_read_rows_sharded_batching(self): # clock ticks one second on each check with mock.patch("time.monotonic", side_effect=range(0, 100000)): with mock.patch("asyncio.gather", AsyncMock()) as gather_mock: - await Table.read_rows_sharded(table_mock, query_list) + await TableAsync.read_rows_sharded(table_mock, query_list) # should have individual calls for each query assert table_mock.read_rows.call_count == n_queries # should have single gather call for each batch @@ -1843,9 +1845,9 @@ async def test_read_rows_sharded_batching(self): class TestSampleRowKeys: def _make_client(self, *args, **kwargs): - from google.cloud.bigtable.client import BigtableDataClient + from google.cloud.bigtable.data._async.client import BigtableDataClientAsync - return BigtableDataClient(*args, **kwargs) + return BigtableDataClientAsync(*args, **kwargs) async def _make_gapic_stream(self, sample_list: list[tuple[bytes, int]]): from google.cloud.bigtable_v2.types import SampleRowKeysResponse @@ -1980,7 +1982,7 @@ async def test_sample_row_keys_retryable_errors(self, retryable_exception): retryable errors should be retried until timeout """ from google.api_core.exceptions import DeadlineExceeded - from google.cloud.bigtable.exceptions import RetryExceptionGroup + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup async with self._make_client() as client: async with client.get_table("instance", "table") as table: @@ -2023,9 +2025,9 @@ async def test_sample_row_keys_non_retryable_errors(self, non_retryable_exceptio class TestMutateRow: def _make_client(self, *args, **kwargs): - from google.cloud.bigtable.client import BigtableDataClient + from google.cloud.bigtable.data._async.client import BigtableDataClientAsync - return BigtableDataClient(*args, **kwargs) + return BigtableDataClientAsync(*args, **kwargs) @pytest.mark.asyncio @pytest.mark.parametrize( @@ -2085,7 +2087,7 @@ async def test_mutate_row(self, mutation_arg): @pytest.mark.asyncio async def test_mutate_row_retryable_errors(self, retryable_exception): from google.api_core.exceptions import DeadlineExceeded - from google.cloud.bigtable.exceptions import RetryExceptionGroup + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup async with self._make_client(project="project") as client: async with client.get_table("instance", "table") as table: @@ -2190,9 +2192,9 @@ async def test_mutate_row_metadata(self, include_app_profile): class TestBulkMutateRows: def _make_client(self, *args, **kwargs): - from google.cloud.bigtable.client import BigtableDataClient + from google.cloud.bigtable.data._async.client import BigtableDataClientAsync - return BigtableDataClient(*args, **kwargs) + return BigtableDataClientAsync(*args, **kwargs) async def _mock_response(self, response_list): from google.cloud.bigtable_v2.types import MutateRowsResponse @@ -2300,7 +2302,7 @@ async def test_bulk_mutate_rows_idempotent_mutation_error_retryable( """ Individual idempotent mutations should be retried if they fail with a retryable error """ - from google.cloud.bigtable.exceptions import ( + from google.cloud.bigtable.data.exceptions import ( RetryExceptionGroup, FailedMutationEntryError, MutationsExceptionGroup, @@ -2347,7 +2349,7 @@ async def test_bulk_mutate_rows_idempotent_mutation_error_non_retryable( """ Individual idempotent mutations should not be retried if they fail with a non-retryable error """ - from google.cloud.bigtable.exceptions import ( + from google.cloud.bigtable.data.exceptions import ( FailedMutationEntryError, MutationsExceptionGroup, ) @@ -2386,7 +2388,7 @@ async def test_bulk_mutate_idempotent_retryable_request_errors( """ Individual idempotent mutations should be retried if the request fails with a retryable error """ - from google.cloud.bigtable.exceptions import ( + from google.cloud.bigtable.data.exceptions import ( RetryExceptionGroup, FailedMutationEntryError, MutationsExceptionGroup, @@ -2425,7 +2427,7 @@ async def test_bulk_mutate_rows_non_idempotent_retryable_errors( self, retryable_exception ): """Non-Idempotent mutations should never be retried""" - from google.cloud.bigtable.exceptions import ( + from google.cloud.bigtable.data.exceptions import ( FailedMutationEntryError, MutationsExceptionGroup, ) @@ -2467,7 +2469,7 @@ async def test_bulk_mutate_rows_non_retryable_errors(self, non_retryable_excepti """ If the request fails with a non-retryable error, mutations should not be retried """ - from google.cloud.bigtable.exceptions import ( + from google.cloud.bigtable.data.exceptions import ( FailedMutationEntryError, MutationsExceptionGroup, ) @@ -2502,7 +2504,7 @@ async def test_bulk_mutate_error_index(self): ServiceUnavailable, FailedPrecondition, ) - from google.cloud.bigtable.exceptions import ( + from google.cloud.bigtable.data.exceptions import ( RetryExceptionGroup, FailedMutationEntryError, MutationsExceptionGroup, @@ -2579,9 +2581,9 @@ async def test_bulk_mutate_row_metadata(self, include_app_profile): class TestCheckAndMutateRow: def _make_client(self, *args, **kwargs): - from google.cloud.bigtable.client import BigtableDataClient + from google.cloud.bigtable.data._async.client import BigtableDataClientAsync - return BigtableDataClient(*args, **kwargs) + return BigtableDataClientAsync(*args, **kwargs) @pytest.mark.parametrize("gapic_result", [True, False]) @pytest.mark.asyncio @@ -2660,7 +2662,7 @@ async def test_check_and_mutate_no_mutations(self): @pytest.mark.asyncio async def test_check_and_mutate_single_mutations(self): """if single mutations are passed, they should be internally wrapped in a list""" - from google.cloud.bigtable.mutations import SetCell + from google.cloud.bigtable.data.mutations import SetCell from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse async with self._make_client() as client: @@ -2713,7 +2715,7 @@ async def test_check_and_mutate_predicate_object(self): async def test_check_and_mutate_mutations_parsing(self): """mutations objects should be converted to dicts""" from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse - from google.cloud.bigtable.mutations import DeleteAllFromRow + from google.cloud.bigtable.data.mutations import DeleteAllFromRow mutations = [mock.Mock() for _ in range(5)] for idx, mutation in enumerate(mutations): @@ -2772,9 +2774,9 @@ async def test_check_and_mutate_metadata(self, include_app_profile): class TestReadModifyWriteRow: def _make_client(self, *args, **kwargs): - from google.cloud.bigtable.client import BigtableDataClient + from google.cloud.bigtable.data._async.client import BigtableDataClientAsync - return BigtableDataClient(*args, **kwargs) + return BigtableDataClientAsync(*args, **kwargs) @pytest.mark.parametrize( "call_rules,expected_rules", @@ -2886,7 +2888,7 @@ async def test_read_modify_write_row_building(self): """ results from gapic call should be used to construct row """ - from google.cloud.bigtable.row import Row + from google.cloud.bigtable.data.row import Row from google.cloud.bigtable_v2.types import ReadModifyWriteRowResponse from google.cloud.bigtable_v2.types import Row as RowPB diff --git a/tests/unit/test_mutations_batcher.py b/tests/unit/data/_async/test_mutations_batcher.py similarity index 94% rename from tests/unit/test_mutations_batcher.py rename to tests/unit/data/_async/test_mutations_batcher.py index a900468d5..1b14cc128 100644 --- a/tests/unit/test_mutations_batcher.py +++ b/tests/unit/data/_async/test_mutations_batcher.py @@ -33,9 +33,11 @@ def _make_mutation(count=1, size=1): class Test_FlowControl: def _make_one(self, max_mutation_count=10, max_mutation_bytes=100): - from google.cloud.bigtable.mutations_batcher import _FlowControl + from google.cloud.bigtable.data._async.mutations_batcher import ( + _FlowControlAsync, + ) - return _FlowControl(max_mutation_count, max_mutation_bytes) + return _FlowControlAsync(max_mutation_count, max_mutation_bytes) def test_ctor(self): max_mutation_count = 9 @@ -238,7 +240,7 @@ async def test_add_to_flow_max_mutation_limits( Should submit request early, even if the flow control has room for more """ with mock.patch( - "google.cloud.bigtable.mutations_batcher.MUTATE_ROWS_REQUEST_MUTATION_LIMIT", + "google.cloud.bigtable.data._async.mutations_batcher.MUTATE_ROWS_REQUEST_MUTATION_LIMIT", max_limit, ): mutation_objs = [_make_mutation(count=m[0], size=m[1]) for m in mutations] @@ -275,11 +277,13 @@ async def test_add_to_flow_oversize(self): assert len(count_results) == 1 -class TestMutationsBatcher: +class TestMutationsBatcherAsync: def _get_target_class(self): - from google.cloud.bigtable.mutations_batcher import MutationsBatcher + from google.cloud.bigtable.data._async.mutations_batcher import ( + MutationsBatcherAsync, + ) - return MutationsBatcher + return MutationsBatcherAsync def _make_one(self, table=None, **kwargs): if table is None: @@ -290,7 +294,7 @@ def _make_one(self, table=None, **kwargs): return self._get_target_class()(table, **kwargs) @mock.patch( - "google.cloud.bigtable.mutations_batcher.MutationsBatcher._start_flush_timer" + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._start_flush_timer" ) @pytest.mark.asyncio async def test_ctor_defaults(self, flush_timer_mock): @@ -320,7 +324,7 @@ async def test_ctor_defaults(self, flush_timer_mock): assert isinstance(instance._flush_timer, asyncio.Future) @mock.patch( - "google.cloud.bigtable.mutations_batcher.MutationsBatcher._start_flush_timer", + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._start_flush_timer", ) @pytest.mark.asyncio async def test_ctor_explicit(self, flush_timer_mock): @@ -368,7 +372,7 @@ async def test_ctor_explicit(self, flush_timer_mock): assert isinstance(instance._flush_timer, asyncio.Future) @mock.patch( - "google.cloud.bigtable.mutations_batcher.MutationsBatcher._start_flush_timer" + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._start_flush_timer" ) @pytest.mark.asyncio async def test_ctor_no_flush_limits(self, flush_timer_mock): @@ -419,19 +423,23 @@ async def test_ctor_invalid_values(self): def test_default_argument_consistency(self): """ - We supply default arguments in MutationsBatcher.__init__, and in + We supply default arguments in MutationsBatcherAsync.__init__, and in table.mutations_batcher. Make sure any changes to defaults are applied to both places """ - from google.cloud.bigtable.client import Table - from google.cloud.bigtable.mutations_batcher import MutationsBatcher + from google.cloud.bigtable.data._async.client import TableAsync + from google.cloud.bigtable.data._async.mutations_batcher import ( + MutationsBatcherAsync, + ) import inspect get_batcher_signature = dict( - inspect.signature(Table.mutations_batcher).parameters + inspect.signature(TableAsync.mutations_batcher).parameters ) get_batcher_signature.pop("self") - batcher_init_signature = dict(inspect.signature(MutationsBatcher).parameters) + batcher_init_signature = dict( + inspect.signature(MutationsBatcherAsync).parameters + ) batcher_init_signature.pop("table") # both should have same number of arguments assert len(get_batcher_signature.keys()) == len(batcher_init_signature.keys()) @@ -446,7 +454,7 @@ def test_default_argument_consistency(self): ) @mock.patch( - "google.cloud.bigtable.mutations_batcher.MutationsBatcher._schedule_flush" + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" ) @pytest.mark.asyncio async def test__start_flush_timer_w_None(self, flush_mock): @@ -458,7 +466,7 @@ async def test__start_flush_timer_w_None(self, flush_mock): assert flush_mock.call_count == 0 @mock.patch( - "google.cloud.bigtable.mutations_batcher.MutationsBatcher._schedule_flush" + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" ) @pytest.mark.asyncio async def test__start_flush_timer_call_when_closed(self, flush_mock): @@ -472,7 +480,7 @@ async def test__start_flush_timer_call_when_closed(self, flush_mock): assert flush_mock.call_count == 0 @mock.patch( - "google.cloud.bigtable.mutations_batcher.MutationsBatcher._schedule_flush" + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" ) @pytest.mark.asyncio async def test__flush_timer(self, flush_mock): @@ -492,7 +500,7 @@ async def test__flush_timer(self, flush_mock): assert flush_mock.call_count == loop_num @mock.patch( - "google.cloud.bigtable.mutations_batcher.MutationsBatcher._schedule_flush" + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" ) @pytest.mark.asyncio async def test__flush_timer_no_mutations(self, flush_mock): @@ -511,7 +519,7 @@ async def test__flush_timer_no_mutations(self, flush_mock): assert flush_mock.call_count == 0 @mock.patch( - "google.cloud.bigtable.mutations_batcher.MutationsBatcher._schedule_flush" + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" ) @pytest.mark.asyncio async def test__flush_timer_close(self, flush_mock): @@ -541,7 +549,7 @@ async def test_append_wrong_mutation(self): Mutation objects should raise an exception. Only support RowMutationEntry """ - from google.cloud.bigtable.mutations import DeleteAllFromRow + from google.cloud.bigtable.data.mutations import DeleteAllFromRow async with self._make_one() as instance: expected_error = "invalid mutation type: DeleteAllFromRow. Only RowMutationEntry objects are supported by batcher" @@ -577,9 +585,13 @@ async def test_append_flush_runs_after_limit_hit(self): If the user appends a bunch of entries above the flush limits back-to-back, it should still flush in a single task """ - from google.cloud.bigtable.mutations_batcher import MutationsBatcher + from google.cloud.bigtable.data._async.mutations_batcher import ( + MutationsBatcherAsync, + ) - with mock.patch.object(MutationsBatcher, "_execute_mutate_rows") as op_mock: + with mock.patch.object( + MutationsBatcherAsync, "_execute_mutate_rows" + ) as op_mock: async with self._make_one(flush_limit_bytes=100) as instance: # mock network calls async def mock_call(*args, **kwargs): @@ -789,7 +801,7 @@ async def test__flush_internal_with_errors( """ errors returned from _execute_mutate_rows should be added to internal exceptions """ - from google.cloud.bigtable import exceptions + from google.cloud.bigtable.data import exceptions num_entries = 10 expected_errors = [ @@ -861,7 +873,7 @@ async def test_timer_flush_end_to_end(self): @pytest.mark.asyncio @mock.patch( - "google.cloud.bigtable.mutations_batcher._MutateRowsOperation", + "google.cloud.bigtable.data._async.mutations_batcher._MutateRowsOperationAsync", ) async def test__execute_mutate_rows(self, mutate_rows): mutate_rows.return_value = AsyncMock() @@ -884,10 +896,12 @@ async def test__execute_mutate_rows(self, mutate_rows): assert result == [] @pytest.mark.asyncio - @mock.patch("google.cloud.bigtable.mutations_batcher._MutateRowsOperation.start") + @mock.patch( + "google.cloud.bigtable.data._async.mutations_batcher._MutateRowsOperationAsync.start" + ) async def test__execute_mutate_rows_returns_errors(self, mutate_rows): """Errors from operation should be retruned as list""" - from google.cloud.bigtable.exceptions import ( + from google.cloud.bigtable.data.exceptions import ( MutationsExceptionGroup, FailedMutationEntryError, ) @@ -911,7 +925,7 @@ async def test__execute_mutate_rows_returns_errors(self, mutate_rows): @pytest.mark.asyncio async def test__raise_exceptions(self): """Raise exceptions and reset error state""" - from google.cloud.bigtable import exceptions + from google.cloud.bigtable.data import exceptions expected_total = 1201 expected_exceptions = [RuntimeError("mock")] * 3 @@ -958,7 +972,7 @@ async def test_close(self): @pytest.mark.asyncio async def test_close_w_exceptions(self): """Raise exceptions on close""" - from google.cloud.bigtable import exceptions + from google.cloud.bigtable.data import exceptions expected_total = 10 expected_exceptions = [RuntimeError("mock")] @@ -1001,20 +1015,14 @@ async def test_atexit_registration(self): """Should run _on_exit on program termination""" import atexit - with mock.patch( - "google.cloud.bigtable.mutations_batcher.MutationsBatcher._on_exit" - ) as on_exit_mock: + with mock.patch.object(atexit, "register") as register_mock: + assert register_mock.call_count == 0 async with self._make_one(): - assert on_exit_mock.call_count == 0 - atexit._run_exitfuncs() - assert on_exit_mock.call_count == 1 - # should not call after close - atexit._run_exitfuncs() - assert on_exit_mock.call_count == 1 + assert register_mock.call_count == 1 @pytest.mark.asyncio @mock.patch( - "google.cloud.bigtable.mutations_batcher._MutateRowsOperation", + "google.cloud.bigtable.data._async.mutations_batcher._MutateRowsOperationAsync", ) async def test_timeout_args_passed(self, mutate_rows): """ diff --git a/tests/unit/read-rows-acceptance-test.json b/tests/unit/data/read-rows-acceptance-test.json similarity index 100% rename from tests/unit/read-rows-acceptance-test.json rename to tests/unit/data/read-rows-acceptance-test.json diff --git a/tests/unit/test__helpers.py b/tests/unit/data/test__helpers.py similarity index 97% rename from tests/unit/test__helpers.py rename to tests/unit/data/test__helpers.py index 9aa1a7bb4..dc688bb0c 100644 --- a/tests/unit/test__helpers.py +++ b/tests/unit/data/test__helpers.py @@ -13,8 +13,8 @@ # import pytest -import google.cloud.bigtable._helpers as _helpers -import google.cloud.bigtable.exceptions as bigtable_exceptions +import google.cloud.bigtable.data._helpers as _helpers +import google.cloud.bigtable.data.exceptions as bigtable_exceptions import mock diff --git a/tests/unit/test__read_rows.py b/tests/unit/data/test__read_rows_state_machine.py similarity index 62% rename from tests/unit/test__read_rows.py rename to tests/unit/data/test__read_rows_state_machine.py index c893c56cd..0d1ee6b06 100644 --- a/tests/unit/test__read_rows.py +++ b/tests/unit/data/test__read_rows_state_machine.py @@ -1,10 +1,22 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import unittest import pytest -from google.cloud.bigtable.exceptions import InvalidChunk -from google.cloud.bigtable._read_rows import AWAITING_NEW_ROW -from google.cloud.bigtable._read_rows import AWAITING_NEW_CELL -from google.cloud.bigtable._read_rows import AWAITING_CELL_VALUE +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data._read_rows_state_machine import AWAITING_NEW_ROW +from google.cloud.bigtable.data._read_rows_state_machine import AWAITING_NEW_CELL +from google.cloud.bigtable.data._read_rows_state_machine import AWAITING_CELL_VALUE # try/except added for compatibility with python < 3.8 try: @@ -20,377 +32,10 @@ TEST_LABELS = ["label1", "label2"] -class TestReadRowsOperation: - """ - Tests helper functions in the ReadRowsOperation class - in-depth merging logic in merge_row_response_stream and _read_rows_retryable_attempt - is tested in test_read_rows_acceptance test_client_read_rows, and conformance tests - """ - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable._read_rows import _ReadRowsOperation - - return _ReadRowsOperation - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_ctor_defaults(self): - request = {} - client = mock.Mock() - client.read_rows = mock.Mock() - client.read_rows.return_value = None - default_operation_timeout = 600 - time_gen_mock = mock.Mock() - with mock.patch( - "google.cloud.bigtable._read_rows._attempt_timeout_generator", time_gen_mock - ): - instance = self._make_one(request, client) - assert time_gen_mock.call_count == 1 - time_gen_mock.assert_called_once_with(None, default_operation_timeout) - assert instance.transient_errors == [] - assert instance._last_emitted_row_key is None - assert instance._emit_count == 0 - assert instance.operation_timeout == default_operation_timeout - retryable_fn = instance._partial_retryable - assert retryable_fn.func == instance._read_rows_retryable_attempt - assert retryable_fn.args[0] == client.read_rows - assert retryable_fn.args[1] == time_gen_mock.return_value - assert retryable_fn.args[2] == 0 - assert client.read_rows.call_count == 0 - - def test_ctor(self): - row_limit = 91 - request = {"rows_limit": row_limit} - client = mock.Mock() - client.read_rows = mock.Mock() - client.read_rows.return_value = None - expected_operation_timeout = 42 - expected_request_timeout = 44 - time_gen_mock = mock.Mock() - with mock.patch( - "google.cloud.bigtable._read_rows._attempt_timeout_generator", time_gen_mock - ): - instance = self._make_one( - request, - client, - operation_timeout=expected_operation_timeout, - per_request_timeout=expected_request_timeout, - ) - assert time_gen_mock.call_count == 1 - time_gen_mock.assert_called_once_with( - expected_request_timeout, expected_operation_timeout - ) - assert instance.transient_errors == [] - assert instance._last_emitted_row_key is None - assert instance._emit_count == 0 - assert instance.operation_timeout == expected_operation_timeout - retryable_fn = instance._partial_retryable - assert retryable_fn.func == instance._read_rows_retryable_attempt - assert retryable_fn.args[0] == client.read_rows - assert retryable_fn.args[1] == time_gen_mock.return_value - assert retryable_fn.args[2] == row_limit - assert client.read_rows.call_count == 0 - - def test___aiter__(self): - request = {} - client = mock.Mock() - client.read_rows = mock.Mock() - instance = self._make_one(request, client) - assert instance.__aiter__() is instance - - @pytest.mark.asyncio - async def test_transient_error_capture(self): - from google.api_core import exceptions as core_exceptions - - client = mock.Mock() - client.read_rows = mock.Mock() - test_exc = core_exceptions.Aborted("test") - test_exc2 = core_exceptions.DeadlineExceeded("test") - client.read_rows.side_effect = [test_exc, test_exc2] - instance = self._make_one({}, client) - with pytest.raises(RuntimeError): - await instance.__anext__() - assert len(instance.transient_errors) == 2 - assert instance.transient_errors[0] == test_exc - assert instance.transient_errors[1] == test_exc2 - - @pytest.mark.parametrize( - "in_keys,last_key,expected", - [ - (["b", "c", "d"], "a", ["b", "c", "d"]), - (["a", "b", "c"], "b", ["c"]), - (["a", "b", "c"], "c", []), - (["a", "b", "c"], "d", []), - (["d", "c", "b", "a"], "b", ["d", "c"]), - ], - ) - def test_revise_request_rowset_keys(self, in_keys, last_key, expected): - sample_range = {"start_key_open": last_key} - row_set = {"row_keys": in_keys, "row_ranges": [sample_range]} - revised = self._get_target_class()._revise_request_rowset(row_set, last_key) - assert revised["row_keys"] == expected - assert revised["row_ranges"] == [sample_range] - - @pytest.mark.parametrize( - "in_ranges,last_key,expected", - [ - ( - [{"start_key_open": "b", "end_key_closed": "d"}], - "a", - [{"start_key_open": "b", "end_key_closed": "d"}], - ), - ( - [{"start_key_closed": "b", "end_key_closed": "d"}], - "a", - [{"start_key_closed": "b", "end_key_closed": "d"}], - ), - ( - [{"start_key_open": "a", "end_key_closed": "d"}], - "b", - [{"start_key_open": "b", "end_key_closed": "d"}], - ), - ( - [{"start_key_closed": "a", "end_key_open": "d"}], - "b", - [{"start_key_open": "b", "end_key_open": "d"}], - ), - ( - [{"start_key_closed": "b", "end_key_closed": "d"}], - "b", - [{"start_key_open": "b", "end_key_closed": "d"}], - ), - ([{"start_key_closed": "b", "end_key_closed": "d"}], "d", []), - ([{"start_key_closed": "b", "end_key_open": "d"}], "d", []), - ([{"start_key_closed": "b", "end_key_closed": "d"}], "e", []), - ([{"start_key_closed": "b"}], "z", [{"start_key_open": "z"}]), - ([{"start_key_closed": "b"}], "a", [{"start_key_closed": "b"}]), - ( - [{"end_key_closed": "z"}], - "a", - [{"start_key_open": "a", "end_key_closed": "z"}], - ), - ( - [{"end_key_open": "z"}], - "a", - [{"start_key_open": "a", "end_key_open": "z"}], - ), - ], - ) - def test_revise_request_rowset_ranges(self, in_ranges, last_key, expected): - next_key = last_key + "a" - row_set = {"row_keys": [next_key], "row_ranges": in_ranges} - revised = self._get_target_class()._revise_request_rowset(row_set, last_key) - assert revised["row_keys"] == [next_key] - assert revised["row_ranges"] == expected - - @pytest.mark.parametrize("last_key", ["a", "b", "c"]) - def test_revise_request_full_table(self, last_key): - row_set = {"row_keys": [], "row_ranges": []} - for selected_set in [row_set, None]: - revised = self._get_target_class()._revise_request_rowset( - selected_set, last_key - ) - assert revised["row_keys"] == [] - assert len(revised["row_ranges"]) == 1 - assert revised["row_ranges"][0]["start_key_open"] == last_key - - def test_revise_to_empty_rowset(self): - """revising to an empty rowset should raise error""" - from google.cloud.bigtable.exceptions import _RowSetComplete - - row_keys = ["a", "b", "c"] - row_set = {"row_keys": row_keys, "row_ranges": [{"end_key_open": "c"}]} - with pytest.raises(_RowSetComplete): - self._get_target_class()._revise_request_rowset(row_set, "d") - - @pytest.mark.parametrize( - "start_limit,emit_num,expected_limit", - [ - (10, 0, 10), - (10, 1, 9), - (10, 10, 0), - (0, 10, 0), - (0, 0, 0), - (4, 2, 2), - ], - ) - @pytest.mark.asyncio - async def test_revise_limit(self, start_limit, emit_num, expected_limit): - """ - revise_limit should revise the request's limit field - - if limit is 0 (unlimited), it should never be revised - - if start_limit-emit_num == 0, the request should end early - - if the number emitted exceeds the new limit, an exception should - should be raised (tested in test_revise_limit_over_limit) - """ - import itertools - - request = {"rows_limit": start_limit} - instance = self._make_one(request, mock.Mock()) - instance._emit_count = emit_num - instance._last_emitted_row_key = "a" - gapic_mock = mock.Mock() - gapic_mock.side_effect = [GeneratorExit("stop_fn")] - mock_timeout_gen = itertools.repeat(5) - - attempt = instance._read_rows_retryable_attempt( - gapic_mock, mock_timeout_gen, start_limit - ) - if start_limit != 0 and expected_limit == 0: - # if we emitted the expected number of rows, we should receive a StopAsyncIteration - with pytest.raises(StopAsyncIteration): - await attempt.__anext__() - else: - with pytest.raises(GeneratorExit): - await attempt.__anext__() - assert request["rows_limit"] == expected_limit - - @pytest.mark.parametrize("start_limit,emit_num", [(5, 10), (3, 9), (1, 10)]) - @pytest.mark.asyncio - async def test_revise_limit_over_limit(self, start_limit, emit_num): - """ - Should raise runtime error if we get in state where emit_num > start_num - (unless start_num == 0, which represents unlimited) - """ - import itertools - - request = {"rows_limit": start_limit} - instance = self._make_one(request, mock.Mock()) - instance._emit_count = emit_num - instance._last_emitted_row_key = "a" - mock_timeout_gen = itertools.repeat(5) - attempt = instance._read_rows_retryable_attempt( - mock.Mock(), mock_timeout_gen, start_limit - ) - with pytest.raises(RuntimeError) as e: - await attempt.__anext__() - assert "emit count exceeds row limit" in str(e.value) - - @pytest.mark.asyncio - async def test_aclose(self): - import asyncio - - instance = self._make_one({}, mock.Mock()) - await instance.aclose() - assert instance._stream is None - assert instance._last_emitted_row_key is None - with pytest.raises(asyncio.InvalidStateError): - await instance.__anext__() - # try calling a second time - await instance.aclose() - - @pytest.mark.parametrize("limit", [1, 3, 10]) - @pytest.mark.asyncio - async def test_retryable_attempt_hit_limit(self, limit): - """ - Stream should end after hitting the limit - """ - from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - import itertools - - instance = self._make_one({}, mock.Mock()) - - async def mock_gapic(*args, **kwargs): - # continuously return a single row - async def gen(): - for i in range(limit * 2): - chunk = ReadRowsResponse.CellChunk( - row_key=str(i).encode(), - family_name="family_name", - qualifier=b"qualifier", - commit_row=True, - ) - yield ReadRowsResponse(chunks=[chunk]) - - return gen() - - mock_timeout_gen = itertools.repeat(5) - gen = instance._read_rows_retryable_attempt(mock_gapic, mock_timeout_gen, limit) - # should yield values up to the limit - for i in range(limit): - await gen.__anext__() - # next value should be StopAsyncIteration - with pytest.raises(StopAsyncIteration): - await gen.__anext__() - - @pytest.mark.asyncio - async def test_retryable_ignore_repeated_rows(self): - """ - Duplicate rows should cause an invalid chunk error - """ - from google.cloud.bigtable._read_rows import _ReadRowsOperation - from google.cloud.bigtable.row import Row - from google.cloud.bigtable.exceptions import InvalidChunk - - async def mock_stream(): - while True: - yield Row(b"dup_key", cells=[]) - yield Row(b"dup_key", cells=[]) - - with mock.patch.object( - _ReadRowsOperation, "merge_row_response_stream" - ) as mock_stream_fn: - mock_stream_fn.return_value = mock_stream() - instance = self._make_one({}, mock.AsyncMock()) - first_row = await instance.__anext__() - assert first_row.row_key == b"dup_key" - with pytest.raises(InvalidChunk) as exc: - await instance.__anext__() - assert "Last emitted row key out of order" in str(exc.value) - - @pytest.mark.asyncio - async def test_retryable_ignore_last_scanned_rows(self): - """ - Last scanned rows should not be emitted - """ - from google.cloud.bigtable._read_rows import _ReadRowsOperation - from google.cloud.bigtable.row import Row, _LastScannedRow - - async def mock_stream(): - while True: - yield Row(b"key1", cells=[]) - yield _LastScannedRow(b"key2_ignored") - yield Row(b"key3", cells=[]) - - with mock.patch.object( - _ReadRowsOperation, "merge_row_response_stream" - ) as mock_stream_fn: - mock_stream_fn.return_value = mock_stream() - instance = self._make_one({}, mock.AsyncMock()) - first_row = await instance.__anext__() - assert first_row.row_key == b"key1" - second_row = await instance.__anext__() - assert second_row.row_key == b"key3" - - @pytest.mark.asyncio - async def test_retryable_cancel_on_close(self): - """Underlying gapic call should be cancelled when stream is closed""" - from google.cloud.bigtable._read_rows import _ReadRowsOperation - from google.cloud.bigtable.row import Row - - async def mock_stream(): - while True: - yield Row(b"key1", cells=[]) - - with mock.patch.object( - _ReadRowsOperation, "merge_row_response_stream" - ) as mock_stream_fn: - mock_stream_fn.return_value = mock_stream() - mock_gapic = mock.AsyncMock() - mock_call = await mock_gapic.read_rows() - instance = self._make_one({}, mock_gapic) - await instance.__anext__() - assert mock_call.cancel.call_count == 0 - await instance.aclose() - assert mock_call.cancel.call_count == 1 - - class TestStateMachine(unittest.TestCase): @staticmethod def _get_target_class(): - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._read_rows_state_machine import _StateMachine return _StateMachine @@ -398,7 +43,7 @@ def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_ctor(self): - from google.cloud.bigtable._read_rows import _RowBuilder + from google.cloud.bigtable.data._read_rows_state_machine import _RowBuilder instance = self._make_one() assert instance.last_seen_row_key is None @@ -435,7 +80,7 @@ def test__reset_row(self): assert instance.adapter.reset.call_count == 1 def test_handle_last_scanned_row_wrong_state(self): - from google.cloud.bigtable.exceptions import InvalidChunk + from google.cloud.bigtable.data.exceptions import InvalidChunk instance = self._make_one() instance.current_state = AWAITING_NEW_CELL @@ -448,7 +93,7 @@ def test_handle_last_scanned_row_wrong_state(self): assert e.value.args[0] == "Last scanned row key received in invalid state" def test_handle_last_scanned_row_out_of_order(self): - from google.cloud.bigtable.exceptions import InvalidChunk + from google.cloud.bigtable.data.exceptions import InvalidChunk instance = self._make_one() instance.last_seen_row_key = b"b" @@ -460,7 +105,7 @@ def test_handle_last_scanned_row_out_of_order(self): assert e.value.args[0] == "Last scanned row key is out of order" def test_handle_last_scanned_row(self): - from google.cloud.bigtable.row import _LastScannedRow + from google.cloud.bigtable.data.row import _LastScannedRow instance = self._make_one() instance.adapter = mock.Mock() @@ -475,7 +120,7 @@ def test_handle_last_scanned_row(self): assert instance.adapter.reset.call_count == 1 def test__handle_complete_row(self): - from google.cloud.bigtable.row import Row + from google.cloud.bigtable.data.row import Row instance = self._make_one() instance.current_state = mock.Mock() @@ -490,7 +135,7 @@ def test__handle_complete_row(self): assert instance.adapter.reset.call_count == 1 def test__handle_reset_chunk_errors(self): - from google.cloud.bigtable.exceptions import InvalidChunk + from google.cloud.bigtable.data.exceptions import InvalidChunk from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse instance = self._make_one() @@ -528,7 +173,7 @@ def test__handle_reset_chunk_errors(self): assert e.value.args[0] == "Reset chunk has labels" def test_handle_chunk_out_of_order(self): - from google.cloud.bigtable.exceptions import InvalidChunk + from google.cloud.bigtable.data.exceptions import InvalidChunk from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse instance = self._make_one() @@ -570,7 +215,7 @@ def handle_chunk_with_commit_wrong_state(self, state): def test_handle_chunk_with_commit(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable.row import Row + from google.cloud.bigtable.data.row import Row instance = self._make_one() with mock.patch.object(type(instance), "_reset_row") as mock_reset: @@ -587,7 +232,7 @@ def test_handle_chunk_with_commit(self): def test_handle_chunk_with_commit_empty_strings(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable.row import Row + from google.cloud.bigtable.data.row import Row instance = self._make_one() with mock.patch.object(type(instance), "_reset_row") as mock_reset: @@ -648,7 +293,7 @@ def test_AWAITING_NEW_ROW(self): def test_AWAITING_NEW_CELL_family_without_qualifier(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._async._read_rows import _StateMachine state_machine = _StateMachine() state_machine.current_qualifier = b"q" @@ -660,7 +305,7 @@ def test_AWAITING_NEW_CELL_family_without_qualifier(self): def test_AWAITING_NEW_CELL_qualifier_without_family(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._async._read_rows import _StateMachine state_machine = _StateMachine() instance = AWAITING_NEW_CELL @@ -671,7 +316,7 @@ def test_AWAITING_NEW_CELL_qualifier_without_family(self): def test_AWAITING_NEW_CELL_no_row_state(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._async._read_rows import _StateMachine state_machine = _StateMachine() instance = AWAITING_NEW_CELL @@ -687,7 +332,7 @@ def test_AWAITING_NEW_CELL_no_row_state(self): def test_AWAITING_NEW_CELL_invalid_row_key(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._async._read_rows import _StateMachine state_machine = _StateMachine() instance = AWAITING_NEW_CELL @@ -699,7 +344,7 @@ def test_AWAITING_NEW_CELL_invalid_row_key(self): def test_AWAITING_NEW_CELL_success_no_split(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._async._read_rows import _StateMachine state_machine = _StateMachine() state_machine.adapter = mock.Mock() @@ -733,7 +378,7 @@ def test_AWAITING_NEW_CELL_success_no_split(self): def test_AWAITING_NEW_CELL_success_with_split(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._async._read_rows import _StateMachine state_machine = _StateMachine() state_machine.adapter = mock.Mock() @@ -768,7 +413,7 @@ def test_AWAITING_NEW_CELL_success_with_split(self): def test_AWAITING_CELL_VALUE_w_row_key(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._async._read_rows import _StateMachine state_machine = _StateMachine() instance = AWAITING_CELL_VALUE @@ -779,7 +424,7 @@ def test_AWAITING_CELL_VALUE_w_row_key(self): def test_AWAITING_CELL_VALUE_w_family(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._async._read_rows import _StateMachine state_machine = _StateMachine() instance = AWAITING_CELL_VALUE @@ -790,7 +435,7 @@ def test_AWAITING_CELL_VALUE_w_family(self): def test_AWAITING_CELL_VALUE_w_qualifier(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._async._read_rows import _StateMachine state_machine = _StateMachine() instance = AWAITING_CELL_VALUE @@ -801,7 +446,7 @@ def test_AWAITING_CELL_VALUE_w_qualifier(self): def test_AWAITING_CELL_VALUE_w_timestamp(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._async._read_rows import _StateMachine state_machine = _StateMachine() instance = AWAITING_CELL_VALUE @@ -812,7 +457,7 @@ def test_AWAITING_CELL_VALUE_w_timestamp(self): def test_AWAITING_CELL_VALUE_w_labels(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._async._read_rows import _StateMachine state_machine = _StateMachine() instance = AWAITING_CELL_VALUE @@ -823,7 +468,7 @@ def test_AWAITING_CELL_VALUE_w_labels(self): def test_AWAITING_CELL_VALUE_continuation(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._async._read_rows import _StateMachine state_machine = _StateMachine() state_machine.adapter = mock.Mock() @@ -838,7 +483,7 @@ def test_AWAITING_CELL_VALUE_continuation(self): def test_AWAITING_CELL_VALUE_final_chunk(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _StateMachine + from google.cloud.bigtable.data._async._read_rows import _StateMachine state_machine = _StateMachine() state_machine.adapter = mock.Mock() @@ -855,7 +500,7 @@ def test_AWAITING_CELL_VALUE_final_chunk(self): class TestRowBuilder(unittest.TestCase): @staticmethod def _get_target_class(): - from google.cloud.bigtable._read_rows import _RowBuilder + from google.cloud.bigtable.data._read_rows_state_machine import _RowBuilder return _RowBuilder @@ -1003,7 +648,7 @@ def test_reset(self): class TestChunkHasField: def test__chunk_has_field_empty(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _chunk_has_field + from google.cloud.bigtable.data._read_rows_state_machine import _chunk_has_field chunk = ReadRowsResponse.CellChunk()._pb assert not _chunk_has_field(chunk, "family_name") @@ -1011,7 +656,7 @@ def test__chunk_has_field_empty(self): def test__chunk_has_field_populated_empty_strings(self): from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse - from google.cloud.bigtable._read_rows import _chunk_has_field + from google.cloud.bigtable.data._read_rows_state_machine import _chunk_has_field chunk = ReadRowsResponse.CellChunk(qualifier=b"", family_name="")._pb assert _chunk_has_field(chunk, "family_name") diff --git a/tests/unit/test_exceptions.py b/tests/unit/data/test_exceptions.py similarity index 95% rename from tests/unit/test_exceptions.py rename to tests/unit/data/test_exceptions.py index ef186a47c..9d1145e36 100644 --- a/tests/unit/test_exceptions.py +++ b/tests/unit/data/test_exceptions.py @@ -16,7 +16,7 @@ import pytest import sys -import google.cloud.bigtable.exceptions as bigtable_exceptions +import google.cloud.bigtable.data.exceptions as bigtable_exceptions # try/except added for compatibility with python < 3.8 try: @@ -31,9 +31,9 @@ class TestBigtableExceptionGroup: """ def _get_class(self): - from google.cloud.bigtable.exceptions import BigtableExceptionGroup + from google.cloud.bigtable.data.exceptions import _BigtableExceptionGroup - return BigtableExceptionGroup + return _BigtableExceptionGroup def _make_one(self, message="test_message", excs=None): if excs is None: @@ -74,7 +74,7 @@ def test_311_traceback(self): exc_group = self._make_one(excs=[sub_exc1, sub_exc2]) expected_traceback = ( - f" | google.cloud.bigtable.exceptions.{type(exc_group).__name__}: {str(exc_group)}", + f" | google.cloud.bigtable.data.exceptions.{type(exc_group).__name__}: {str(exc_group)}", " +-+---------------- 1 ----------------", " | RuntimeError: first sub exception", " +---------------- 2 ----------------", @@ -123,7 +123,7 @@ def test_exception_handling(self): class TestMutationsExceptionGroup(TestBigtableExceptionGroup): def _get_class(self): - from google.cloud.bigtable.exceptions import MutationsExceptionGroup + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup return MutationsExceptionGroup @@ -228,7 +228,7 @@ def test_from_truncated_lists( class TestRetryExceptionGroup(TestBigtableExceptionGroup): def _get_class(self): - from google.cloud.bigtable.exceptions import RetryExceptionGroup + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup return RetryExceptionGroup @@ -269,7 +269,7 @@ def test_raise(self, exception_list, expected_message): class TestShardedReadRowsExceptionGroup(TestBigtableExceptionGroup): def _get_class(self): - from google.cloud.bigtable.exceptions import ShardedReadRowsExceptionGroup + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup return ShardedReadRowsExceptionGroup @@ -306,7 +306,7 @@ def test_raise(self, exception_list, succeeded, total_entries, expected_message) class TestFailedMutationEntryError: def _get_class(self): - from google.cloud.bigtable.exceptions import FailedMutationEntryError + from google.cloud.bigtable.data.exceptions import FailedMutationEntryError return FailedMutationEntryError @@ -374,7 +374,7 @@ def test_no_index(self): class TestFailedQueryShardError: def _get_class(self): - from google.cloud.bigtable.exceptions import FailedQueryShardError + from google.cloud.bigtable.data.exceptions import FailedQueryShardError return FailedQueryShardError diff --git a/tests/unit/test_mutations.py b/tests/unit/data/test_mutations.py similarity index 97% rename from tests/unit/test_mutations.py rename to tests/unit/data/test_mutations.py index c8c6788b1..8365dbd02 100644 --- a/tests/unit/test_mutations.py +++ b/tests/unit/data/test_mutations.py @@ -14,7 +14,7 @@ import pytest -import google.cloud.bigtable.mutations as mutations +import google.cloud.bigtable.data.mutations as mutations # try/except added for compatibility with python < 3.8 try: @@ -25,7 +25,7 @@ class TestBaseMutation: def _target_class(self): - from google.cloud.bigtable.mutations import Mutation + from google.cloud.bigtable.data.mutations import Mutation return Mutation @@ -173,7 +173,7 @@ def test__from_dict_wrong_subclass(self): class TestSetCell: def _target_class(self): - from google.cloud.bigtable.mutations import SetCell + from google.cloud.bigtable.data.mutations import SetCell return SetCell @@ -336,7 +336,7 @@ def test___str__(self): class TestDeleteRangeFromColumn: def _target_class(self): - from google.cloud.bigtable.mutations import DeleteRangeFromColumn + from google.cloud.bigtable.data.mutations import DeleteRangeFromColumn return DeleteRangeFromColumn @@ -423,7 +423,7 @@ def test___str__(self): class TestDeleteAllFromFamily: def _target_class(self): - from google.cloud.bigtable.mutations import DeleteAllFromFamily + from google.cloud.bigtable.data.mutations import DeleteAllFromFamily return DeleteAllFromFamily @@ -460,7 +460,7 @@ def test___str__(self): class TestDeleteFromRow: def _target_class(self): - from google.cloud.bigtable.mutations import DeleteAllFromRow + from google.cloud.bigtable.data.mutations import DeleteAllFromRow return DeleteAllFromRow @@ -490,7 +490,7 @@ def test___str__(self): class TestRowMutationEntry: def _target_class(self): - from google.cloud.bigtable.mutations import RowMutationEntry + from google.cloud.bigtable.data.mutations import RowMutationEntry return RowMutationEntry @@ -506,7 +506,7 @@ def test_ctor(self): def test_ctor_over_limit(self): """Should raise error if mutations exceed MAX_MUTATIONS_PER_ENTRY""" - from google.cloud.bigtable._mutate_rows import ( + from google.cloud.bigtable.data.mutations import ( MUTATE_ROWS_REQUEST_MUTATION_LIMIT, ) @@ -527,7 +527,7 @@ def test_ctor_str_key(self): assert list(instance.mutations) == expected_mutations def test_ctor_single_mutation(self): - from google.cloud.bigtable.mutations import DeleteAllFromRow + from google.cloud.bigtable.data.mutations import DeleteAllFromRow expected_key = b"row_key" expected_mutations = DeleteAllFromRow() diff --git a/tests/unit/test_read_modify_write_rules.py b/tests/unit/data/test_read_modify_write_rules.py similarity index 94% rename from tests/unit/test_read_modify_write_rules.py rename to tests/unit/data/test_read_modify_write_rules.py index 02240df6d..aeb41f19c 100644 --- a/tests/unit/test_read_modify_write_rules.py +++ b/tests/unit/data/test_read_modify_write_rules.py @@ -24,7 +24,9 @@ class TestBaseReadModifyWriteRule: def _target_class(self): - from google.cloud.bigtable.read_modify_write_rules import ReadModifyWriteRule + from google.cloud.bigtable.data.read_modify_write_rules import ( + ReadModifyWriteRule, + ) return ReadModifyWriteRule @@ -40,7 +42,7 @@ def test__to_dict(self): class TestIncrementRule: def _target_class(self): - from google.cloud.bigtable.read_modify_write_rules import IncrementRule + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule return IncrementRule @@ -98,7 +100,7 @@ def test__to_dict(self, args, expected): class TestAppendValueRule: def _target_class(self): - from google.cloud.bigtable.read_modify_write_rules import AppendValueRule + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule return AppendValueRule diff --git a/tests/unit/test_read_rows_acceptance.py b/tests/unit/data/test_read_rows_acceptance.py similarity index 93% rename from tests/unit/test_read_rows_acceptance.py rename to tests/unit/data/test_read_rows_acceptance.py index 2349d25c6..804e4e0fb 100644 --- a/tests/unit/test_read_rows_acceptance.py +++ b/tests/unit/data/test_read_rows_acceptance.py @@ -21,12 +21,13 @@ from google.cloud.bigtable_v2 import ReadRowsResponse -from google.cloud.bigtable.client import BigtableDataClient -from google.cloud.bigtable.exceptions import InvalidChunk -from google.cloud.bigtable._read_rows import _ReadRowsOperation, _StateMachine -from google.cloud.bigtable.row import Row +from google.cloud.bigtable.data._async.client import BigtableDataClientAsync +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync +from google.cloud.bigtable.data._read_rows_state_machine import _StateMachine +from google.cloud.bigtable.data.row import Row -from .v2_client.test_row_merger import ReadRowsTest, TestFile +from ..v2_client.test_row_merger import ReadRowsTest, TestFile def parse_readrows_acceptance_tests(): @@ -67,7 +68,7 @@ async def _scenerio_stream(): try: state = _StateMachine() results = [] - async for row in _ReadRowsOperation.merge_row_response_stream( + async for row in _ReadRowsOperationAsync.merge_row_response_stream( _scenerio_stream(), state ): for cell in row: @@ -117,7 +118,7 @@ def cancel(self): return mock_stream(chunk_list) try: - client = BigtableDataClient() + client = BigtableDataClientAsync() table = client.get_table("instance", "table") results = [] with mock.patch.object(table.client._gapic_client, "read_rows") as read_rows: @@ -150,7 +151,7 @@ async def _row_stream(): state = _StateMachine() state.last_seen_row_key = b"a" with pytest.raises(InvalidChunk): - async for _ in _ReadRowsOperation.merge_row_response_stream( + async for _ in _ReadRowsOperationAsync.merge_row_response_stream( _row_stream(), state ): pass @@ -309,6 +310,8 @@ async def _row_stream(): state = _StateMachine() results = [] - async for row in _ReadRowsOperation.merge_row_response_stream(_row_stream(), state): + async for row in _ReadRowsOperationAsync.merge_row_response_stream( + _row_stream(), state + ): results.append(row) return results diff --git a/tests/unit/test_read_rows_query.py b/tests/unit/data/test_read_rows_query.py similarity index 94% rename from tests/unit/test_read_rows_query.py rename to tests/unit/data/test_read_rows_query.py index 7ecd91f8c..88fde2d24 100644 --- a/tests/unit/test_read_rows_query.py +++ b/tests/unit/data/test_read_rows_query.py @@ -23,7 +23,7 @@ class TestRowRange: @staticmethod def _get_target_class(): - from google.cloud.bigtable.read_rows_query import RowRange + from google.cloud.bigtable.data.read_rows_query import RowRange return RowRange @@ -139,7 +139,7 @@ def test__from_dict( start_is_inclusive, end_is_inclusive, ): - from google.cloud.bigtable.read_rows_query import RowRange + from google.cloud.bigtable.data.read_rows_query import RowRange row_range = RowRange._from_dict(input_dict) assert row_range._to_dict().keys() == input_dict.keys() @@ -172,7 +172,7 @@ def test__from_dict( ], ) def test__from_points(self, dict_repr): - from google.cloud.bigtable.read_rows_query import RowRange + from google.cloud.bigtable.data.read_rows_query import RowRange row_range_from_dict = RowRange._from_dict(dict_repr) row_range_from_points = RowRange._from_points( @@ -210,7 +210,7 @@ def test__from_points(self, dict_repr): ], ) def test___hash__(self, first_dict, second_dict, should_match): - from google.cloud.bigtable.read_rows_query import RowRange + from google.cloud.bigtable.data.read_rows_query import RowRange row_range1 = RowRange._from_dict(first_dict) row_range2 = RowRange._from_dict(second_dict) @@ -233,7 +233,7 @@ def test___bool__(self, dict_repr, expected): """ Only row range with both points empty should be falsy """ - from google.cloud.bigtable.read_rows_query import RowRange + from google.cloud.bigtable.data.read_rows_query import RowRange row_range = RowRange._from_dict(dict_repr) assert bool(row_range) is expected @@ -242,7 +242,7 @@ def test___bool__(self, dict_repr, expected): class TestReadRowsQuery: @staticmethod def _get_target_class(): - from google.cloud.bigtable.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery return ReadRowsQuery @@ -257,8 +257,8 @@ def test_ctor_defaults(self): assert query.limit is None def test_ctor_explicit(self): - from google.cloud.bigtable.row_filters import RowFilterChain - from google.cloud.bigtable.read_rows_query import RowRange + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.read_rows_query import RowRange filter_ = RowFilterChain() query = self._make_one( @@ -281,7 +281,7 @@ def test_ctor_invalid_limit(self): assert str(exc.value) == "limit must be >= 0" def test_set_filter(self): - from google.cloud.bigtable.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowFilterChain filter1 = RowFilterChain() query = self._make_one() @@ -300,7 +300,7 @@ def test_set_filter(self): assert str(exc.value) == "row_filter must be a RowFilter or dict" def test_set_filter_dict(self): - from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter from google.cloud.bigtable_v2.types.bigtable import ReadRowsRequest filter1 = RowSampleFilter(0.5) @@ -402,7 +402,7 @@ def test_duplicate_rows(self): assert len(query.row_keys) == 3 def test_add_range(self): - from google.cloud.bigtable.read_rows_query import RowRange + from google.cloud.bigtable.data.read_rows_query import RowRange query = self._make_one() assert query.row_ranges == set() @@ -419,7 +419,7 @@ def test_add_range(self): assert len(query.row_ranges) == 2 def test_add_range_dict(self): - from google.cloud.bigtable.read_rows_query import RowRange + from google.cloud.bigtable.data.read_rows_query import RowRange query = self._make_one() assert query.row_ranges == set() @@ -449,8 +449,8 @@ def test_to_dict_rows_default(self): def test_to_dict_rows_populated(self): # dictionary should be in rowset proto format from google.cloud.bigtable_v2.types.bigtable import ReadRowsRequest - from google.cloud.bigtable.row_filters import PassAllFilter - from google.cloud.bigtable.read_rows_query import RowRange + from google.cloud.bigtable.data.row_filters import PassAllFilter + from google.cloud.bigtable.data.read_rows_query import RowRange row_filter = PassAllFilter(False) query = self._make_one(limit=100, row_filter=row_filter) @@ -494,7 +494,7 @@ def test_to_dict_rows_populated(self): assert filter_proto == row_filter._to_pb() def _parse_query_string(self, query_string): - from google.cloud.bigtable.read_rows_query import ReadRowsQuery, RowRange + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery, RowRange query = ReadRowsQuery() segments = query_string.split(",") @@ -550,7 +550,7 @@ def test_shard_full_table_scan_empty_split(self): """ Sharding a full table scan with no split should return another full table scan. """ - from google.cloud.bigtable.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery full_scan_query = ReadRowsQuery() split_points = [] @@ -563,7 +563,7 @@ def test_shard_full_table_scan_with_split(self): """ Test splitting a full table scan into two queries """ - from google.cloud.bigtable.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery full_scan_query = ReadRowsQuery() split_points = [(b"a", None)] @@ -576,7 +576,7 @@ def test_shard_full_table_scan_with_multiple_split(self): """ Test splitting a full table scan into three queries """ - from google.cloud.bigtable.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery full_scan_query = ReadRowsQuery() split_points = [(b"a", None), (b"z", None)] @@ -684,7 +684,7 @@ def test_shard_limit_exception(self): """ queries with a limit should raise an exception when a shard is attempted """ - from google.cloud.bigtable.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery query = ReadRowsQuery(limit=10) with pytest.raises(AttributeError) as e: @@ -718,8 +718,8 @@ def test_shard_limit_exception(self): ], ) def test___eq__(self, first_args, second_args, expected): - from google.cloud.bigtable.read_rows_query import ReadRowsQuery - from google.cloud.bigtable.read_rows_query import RowRange + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import RowRange # replace row_range placeholders with a RowRange object if len(first_args) > 1: @@ -733,7 +733,7 @@ def test___eq__(self, first_args, second_args, expected): assert (first == second) == expected def test___repr__(self): - from google.cloud.bigtable.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery instance = self._make_one(row_keys=["a", "b"], row_filter={}, limit=10) # should be able to recreate the instance from the repr diff --git a/tests/unit/test_row.py b/tests/unit/data/test_row.py similarity index 98% rename from tests/unit/test_row.py rename to tests/unit/data/test_row.py index 0413b2889..c9c797b61 100644 --- a/tests/unit/test_row.py +++ b/tests/unit/data/test_row.py @@ -27,7 +27,7 @@ class TestRow(unittest.TestCase): @staticmethod def _get_target_class(): - from google.cloud.bigtable.row import Row + from google.cloud.bigtable.data.row import Row return Row @@ -45,7 +45,7 @@ def _make_cell( timestamp=TEST_TIMESTAMP, labels=TEST_LABELS, ): - from google.cloud.bigtable.row import Cell + from google.cloud.bigtable.data.row import Cell return Cell(value, row_key, family_id, qualifier, timestamp, labels) @@ -223,7 +223,7 @@ def test_to_dict(self): self.assertEqual(column.cells[1].labels, TEST_LABELS) def test_iteration(self): - from google.cloud.bigtable.row import Cell + from google.cloud.bigtable.data.row import Cell # should be able to iterate over the Row as a list cell1 = self._make_cell(value=b"1") @@ -499,7 +499,7 @@ def test_index_of(self): class TestCell(unittest.TestCase): @staticmethod def _get_target_class(): - from google.cloud.bigtable.row import Cell + from google.cloud.bigtable.data.row import Cell return Cell @@ -623,7 +623,7 @@ def test___str__(self): self.assertEqual(str(cell), str(test_value)) def test___repr__(self): - from google.cloud.bigtable.row import Cell # type: ignore # noqa: F401 + from google.cloud.bigtable.data.row import Cell # type: ignore # noqa: F401 cell = self._make_one() expected = ( @@ -637,7 +637,7 @@ def test___repr__(self): self.assertEqual(result, cell) def test___repr___no_labels(self): - from google.cloud.bigtable.row import Cell # type: ignore # noqa: F401 + from google.cloud.bigtable.data.row import Cell # type: ignore # noqa: F401 cell_no_labels = self._make_one( TEST_VALUE, diff --git a/tests/unit/test_row_filters.py b/tests/unit/data/test_row_filters.py similarity index 81% rename from tests/unit/test_row_filters.py rename to tests/unit/data/test_row_filters.py index 11ff9f2f1..a3e275e70 100644 --- a/tests/unit/test_row_filters.py +++ b/tests/unit/data/test_row_filters.py @@ -17,10 +17,10 @@ def test_abstract_class_constructors(): - from google.cloud.bigtable.row_filters import RowFilter - from google.cloud.bigtable.row_filters import _BoolFilter - from google.cloud.bigtable.row_filters import _FilterCombination - from google.cloud.bigtable.row_filters import _CellCountFilter + from google.cloud.bigtable.data.row_filters import RowFilter + from google.cloud.bigtable.data.row_filters import _BoolFilter + from google.cloud.bigtable.data.row_filters import _FilterCombination + from google.cloud.bigtable.data.row_filters import _CellCountFilter with pytest.raises(TypeError): RowFilter() @@ -64,7 +64,7 @@ def test_bool_filter___ne__same_value(): def test_sink_filter_to_pb(): - from google.cloud.bigtable.row_filters import SinkFilter + from google.cloud.bigtable.data.row_filters import SinkFilter flag = True row_filter = SinkFilter(flag) @@ -74,7 +74,7 @@ def test_sink_filter_to_pb(): def test_sink_filter_to_dict(): - from google.cloud.bigtable.row_filters import SinkFilter + from google.cloud.bigtable.data.row_filters import SinkFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 flag = True @@ -86,7 +86,7 @@ def test_sink_filter_to_dict(): def test_sink_filter___repr__(): - from google.cloud.bigtable.row_filters import SinkFilter + from google.cloud.bigtable.data.row_filters import SinkFilter flag = True row_filter = SinkFilter(flag) @@ -96,7 +96,7 @@ def test_sink_filter___repr__(): def test_pass_all_filter_to_pb(): - from google.cloud.bigtable.row_filters import PassAllFilter + from google.cloud.bigtable.data.row_filters import PassAllFilter flag = True row_filter = PassAllFilter(flag) @@ -106,7 +106,7 @@ def test_pass_all_filter_to_pb(): def test_pass_all_filter_to_dict(): - from google.cloud.bigtable.row_filters import PassAllFilter + from google.cloud.bigtable.data.row_filters import PassAllFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 flag = True @@ -118,7 +118,7 @@ def test_pass_all_filter_to_dict(): def test_pass_all_filter___repr__(): - from google.cloud.bigtable.row_filters import PassAllFilter + from google.cloud.bigtable.data.row_filters import PassAllFilter flag = True row_filter = PassAllFilter(flag) @@ -128,7 +128,7 @@ def test_pass_all_filter___repr__(): def test_block_all_filter_to_pb(): - from google.cloud.bigtable.row_filters import BlockAllFilter + from google.cloud.bigtable.data.row_filters import BlockAllFilter flag = True row_filter = BlockAllFilter(flag) @@ -138,7 +138,7 @@ def test_block_all_filter_to_pb(): def test_block_all_filter_to_dict(): - from google.cloud.bigtable.row_filters import BlockAllFilter + from google.cloud.bigtable.data.row_filters import BlockAllFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 flag = True @@ -150,7 +150,7 @@ def test_block_all_filter_to_dict(): def test_block_all_filter___repr__(): - from google.cloud.bigtable.row_filters import BlockAllFilter + from google.cloud.bigtable.data.row_filters import BlockAllFilter flag = True row_filter = BlockAllFilter(flag) @@ -198,7 +198,7 @@ def test_regex_filter__ne__same_value(): def test_row_key_regex_filter_to_pb(): - from google.cloud.bigtable.row_filters import RowKeyRegexFilter + from google.cloud.bigtable.data.row_filters import RowKeyRegexFilter regex = b"row-key-regex" row_filter = RowKeyRegexFilter(regex) @@ -208,7 +208,7 @@ def test_row_key_regex_filter_to_pb(): def test_row_key_regex_filter_to_dict(): - from google.cloud.bigtable.row_filters import RowKeyRegexFilter + from google.cloud.bigtable.data.row_filters import RowKeyRegexFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 regex = b"row-key-regex" @@ -220,7 +220,7 @@ def test_row_key_regex_filter_to_dict(): def test_row_key_regex_filter___repr__(): - from google.cloud.bigtable.row_filters import RowKeyRegexFilter + from google.cloud.bigtable.data.row_filters import RowKeyRegexFilter regex = b"row-key-regex" row_filter = RowKeyRegexFilter(regex) @@ -230,7 +230,7 @@ def test_row_key_regex_filter___repr__(): def test_row_sample_filter_constructor(): - from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter sample = object() row_filter = RowSampleFilter(sample) @@ -238,7 +238,7 @@ def test_row_sample_filter_constructor(): def test_row_sample_filter___eq__type_differ(): - from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter sample = object() row_filter1 = RowSampleFilter(sample) @@ -247,7 +247,7 @@ def test_row_sample_filter___eq__type_differ(): def test_row_sample_filter___eq__same_value(): - from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter sample = object() row_filter1 = RowSampleFilter(sample) @@ -256,7 +256,7 @@ def test_row_sample_filter___eq__same_value(): def test_row_sample_filter___ne__(): - from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter sample = object() other_sample = object() @@ -266,7 +266,7 @@ def test_row_sample_filter___ne__(): def test_row_sample_filter_to_pb(): - from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter sample = 0.25 row_filter = RowSampleFilter(sample) @@ -276,7 +276,7 @@ def test_row_sample_filter_to_pb(): def test_row_sample_filter___repr__(): - from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter sample = 0.25 row_filter = RowSampleFilter(sample) @@ -286,7 +286,7 @@ def test_row_sample_filter___repr__(): def test_family_name_regex_filter_to_pb(): - from google.cloud.bigtable.row_filters import FamilyNameRegexFilter + from google.cloud.bigtable.data.row_filters import FamilyNameRegexFilter regex = "family-regex" row_filter = FamilyNameRegexFilter(regex) @@ -296,7 +296,7 @@ def test_family_name_regex_filter_to_pb(): def test_family_name_regex_filter_to_dict(): - from google.cloud.bigtable.row_filters import FamilyNameRegexFilter + from google.cloud.bigtable.data.row_filters import FamilyNameRegexFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 regex = "family-regex" @@ -308,7 +308,7 @@ def test_family_name_regex_filter_to_dict(): def test_family_name_regex_filter___repr__(): - from google.cloud.bigtable.row_filters import FamilyNameRegexFilter + from google.cloud.bigtable.data.row_filters import FamilyNameRegexFilter regex = "family-regex" row_filter = FamilyNameRegexFilter(regex) @@ -319,7 +319,7 @@ def test_family_name_regex_filter___repr__(): def test_column_qualifier_regex_filter_to_pb(): - from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter + from google.cloud.bigtable.data.row_filters import ColumnQualifierRegexFilter regex = b"column-regex" row_filter = ColumnQualifierRegexFilter(regex) @@ -329,7 +329,7 @@ def test_column_qualifier_regex_filter_to_pb(): def test_column_qualifier_regex_filter_to_dict(): - from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter + from google.cloud.bigtable.data.row_filters import ColumnQualifierRegexFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 regex = b"column-regex" @@ -341,7 +341,7 @@ def test_column_qualifier_regex_filter_to_dict(): def test_column_qualifier_regex_filter___repr__(): - from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter + from google.cloud.bigtable.data.row_filters import ColumnQualifierRegexFilter regex = b"column-regex" row_filter = ColumnQualifierRegexFilter(regex) @@ -351,7 +351,7 @@ def test_column_qualifier_regex_filter___repr__(): def test_timestamp_range_constructor(): - from google.cloud.bigtable.row_filters import TimestampRange + from google.cloud.bigtable.data.row_filters import TimestampRange start = object() end = object() @@ -361,7 +361,7 @@ def test_timestamp_range_constructor(): def test_timestamp_range___eq__(): - from google.cloud.bigtable.row_filters import TimestampRange + from google.cloud.bigtable.data.row_filters import TimestampRange start = object() end = object() @@ -371,7 +371,7 @@ def test_timestamp_range___eq__(): def test_timestamp_range___eq__type_differ(): - from google.cloud.bigtable.row_filters import TimestampRange + from google.cloud.bigtable.data.row_filters import TimestampRange start = object() end = object() @@ -381,7 +381,7 @@ def test_timestamp_range___eq__type_differ(): def test_timestamp_range___ne__same_value(): - from google.cloud.bigtable.row_filters import TimestampRange + from google.cloud.bigtable.data.row_filters import TimestampRange start = object() end = object() @@ -393,7 +393,7 @@ def test_timestamp_range___ne__same_value(): def _timestamp_range_to_pb_helper(pb_kwargs, start=None, end=None): import datetime from google.cloud._helpers import _EPOCH - from google.cloud.bigtable.row_filters import TimestampRange + from google.cloud.bigtable.data.row_filters import TimestampRange if start is not None: start = _EPOCH + datetime.timedelta(microseconds=start) @@ -421,7 +421,7 @@ def test_timestamp_range_to_pb(): def test_timestamp_range_to_dict(): - from google.cloud.bigtable.row_filters import TimestampRange + from google.cloud.bigtable.data.row_filters import TimestampRange from google.cloud.bigtable_v2.types import data as data_v2_pb2 import datetime @@ -448,7 +448,7 @@ def test_timestamp_range_to_pb_start_only(): def test_timestamp_range_to_dict_start_only(): - from google.cloud.bigtable.row_filters import TimestampRange + from google.cloud.bigtable.data.row_filters import TimestampRange from google.cloud.bigtable_v2.types import data as data_v2_pb2 import datetime @@ -470,7 +470,7 @@ def test_timestamp_range_to_pb_end_only(): def test_timestamp_range_to_dict_end_only(): - from google.cloud.bigtable.row_filters import TimestampRange + from google.cloud.bigtable.data.row_filters import TimestampRange from google.cloud.bigtable_v2.types import data as data_v2_pb2 import datetime @@ -482,7 +482,7 @@ def test_timestamp_range_to_dict_end_only(): def timestamp_range___repr__(): - from google.cloud.bigtable.row_filters import TimestampRange + from google.cloud.bigtable.data.row_filters import TimestampRange start = object() end = object() @@ -493,7 +493,7 @@ def timestamp_range___repr__(): def test_timestamp_range_filter___eq__type_differ(): - from google.cloud.bigtable.row_filters import TimestampRangeFilter + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter range_ = object() row_filter1 = TimestampRangeFilter(range_) @@ -502,7 +502,7 @@ def test_timestamp_range_filter___eq__type_differ(): def test_timestamp_range_filter___eq__same_value(): - from google.cloud.bigtable.row_filters import TimestampRangeFilter + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter range_ = object() row_filter1 = TimestampRangeFilter(range_) @@ -511,7 +511,7 @@ def test_timestamp_range_filter___eq__same_value(): def test_timestamp_range_filter___ne__(): - from google.cloud.bigtable.row_filters import TimestampRangeFilter + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter range_ = object() other_range_ = object() @@ -521,7 +521,7 @@ def test_timestamp_range_filter___ne__(): def test_timestamp_range_filter_to_pb(): - from google.cloud.bigtable.row_filters import TimestampRangeFilter + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter row_filter = TimestampRangeFilter() pb_val = row_filter._to_pb() @@ -530,7 +530,7 @@ def test_timestamp_range_filter_to_pb(): def test_timestamp_range_filter_to_dict(): - from google.cloud.bigtable.row_filters import TimestampRangeFilter + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 import datetime @@ -549,7 +549,7 @@ def test_timestamp_range_filter_to_dict(): def test_timestamp_range_filter_empty_to_dict(): - from google.cloud.bigtable.row_filters import TimestampRangeFilter + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 row_filter = TimestampRangeFilter() @@ -560,7 +560,7 @@ def test_timestamp_range_filter_empty_to_dict(): def test_timestamp_range_filter___repr__(): - from google.cloud.bigtable.row_filters import TimestampRangeFilter + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter import datetime start = datetime.datetime(2019, 1, 1) @@ -575,7 +575,7 @@ def test_timestamp_range_filter___repr__(): def test_column_range_filter_constructor_defaults(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter family_id = object() row_filter = ColumnRangeFilter(family_id) @@ -587,7 +587,7 @@ def test_column_range_filter_constructor_defaults(): def test_column_range_filter_constructor_explicit(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter family_id = object() start_qualifier = object() @@ -609,7 +609,7 @@ def test_column_range_filter_constructor_explicit(): def test_column_range_filter_constructor_(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter family_id = object() with pytest.raises(ValueError): @@ -617,7 +617,7 @@ def test_column_range_filter_constructor_(): def test_column_range_filter_constructor_bad_end(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter family_id = object() with pytest.raises(ValueError): @@ -625,7 +625,7 @@ def test_column_range_filter_constructor_bad_end(): def test_column_range_filter___eq__(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter family_id = object() start_qualifier = object() @@ -650,7 +650,7 @@ def test_column_range_filter___eq__(): def test_column_range_filter___eq__type_differ(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter family_id = object() row_filter1 = ColumnRangeFilter(family_id) @@ -659,7 +659,7 @@ def test_column_range_filter___eq__type_differ(): def test_column_range_filter___ne__(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter family_id = object() other_family_id = object() @@ -685,7 +685,7 @@ def test_column_range_filter___ne__(): def test_column_range_filter_to_pb(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter family_id = "column-family-id" row_filter = ColumnRangeFilter(family_id) @@ -695,7 +695,7 @@ def test_column_range_filter_to_pb(): def test_column_range_filter_to_dict(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 family_id = "column-family-id" @@ -707,7 +707,7 @@ def test_column_range_filter_to_dict(): def test_column_range_filter_to_pb_inclusive_start(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter family_id = "column-family-id" column = b"column" @@ -718,7 +718,7 @@ def test_column_range_filter_to_pb_inclusive_start(): def test_column_range_filter_to_pb_exclusive_start(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter family_id = "column-family-id" column = b"column" @@ -731,7 +731,7 @@ def test_column_range_filter_to_pb_exclusive_start(): def test_column_range_filter_to_pb_inclusive_end(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter family_id = "column-family-id" column = b"column" @@ -742,7 +742,7 @@ def test_column_range_filter_to_pb_inclusive_end(): def test_column_range_filter_to_pb_exclusive_end(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter family_id = "column-family-id" column = b"column" @@ -753,7 +753,7 @@ def test_column_range_filter_to_pb_exclusive_end(): def test_column_range_filter___repr__(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter family_id = "column-family-id" start_qualifier = b"column" @@ -766,7 +766,7 @@ def test_column_range_filter___repr__(): def test_value_regex_filter_to_pb_w_bytes(): - from google.cloud.bigtable.row_filters import ValueRegexFilter + from google.cloud.bigtable.data.row_filters import ValueRegexFilter value = regex = b"value-regex" row_filter = ValueRegexFilter(value) @@ -776,7 +776,7 @@ def test_value_regex_filter_to_pb_w_bytes(): def test_value_regex_filter_to_dict_w_bytes(): - from google.cloud.bigtable.row_filters import ValueRegexFilter + from google.cloud.bigtable.data.row_filters import ValueRegexFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 value = regex = b"value-regex" @@ -788,7 +788,7 @@ def test_value_regex_filter_to_dict_w_bytes(): def test_value_regex_filter_to_pb_w_str(): - from google.cloud.bigtable.row_filters import ValueRegexFilter + from google.cloud.bigtable.data.row_filters import ValueRegexFilter value = "value-regex" regex = value.encode("ascii") @@ -799,7 +799,7 @@ def test_value_regex_filter_to_pb_w_str(): def test_value_regex_filter_to_dict_w_str(): - from google.cloud.bigtable.row_filters import ValueRegexFilter + from google.cloud.bigtable.data.row_filters import ValueRegexFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 value = "value-regex" @@ -812,7 +812,7 @@ def test_value_regex_filter_to_dict_w_str(): def test_value_regex_filter___repr__(): - from google.cloud.bigtable.row_filters import ValueRegexFilter + from google.cloud.bigtable.data.row_filters import ValueRegexFilter value = "value-regex" row_filter = ValueRegexFilter(value) @@ -823,7 +823,7 @@ def test_value_regex_filter___repr__(): def test_literal_value_filter_to_pb_w_bytes(): - from google.cloud.bigtable.row_filters import LiteralValueFilter + from google.cloud.bigtable.data.row_filters import LiteralValueFilter value = regex = b"value_regex" row_filter = LiteralValueFilter(value) @@ -833,7 +833,7 @@ def test_literal_value_filter_to_pb_w_bytes(): def test_literal_value_filter_to_dict_w_bytes(): - from google.cloud.bigtable.row_filters import LiteralValueFilter + from google.cloud.bigtable.data.row_filters import LiteralValueFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 value = regex = b"value_regex" @@ -845,7 +845,7 @@ def test_literal_value_filter_to_dict_w_bytes(): def test_literal_value_filter_to_pb_w_str(): - from google.cloud.bigtable.row_filters import LiteralValueFilter + from google.cloud.bigtable.data.row_filters import LiteralValueFilter value = "value_regex" regex = value.encode("ascii") @@ -856,7 +856,7 @@ def test_literal_value_filter_to_pb_w_str(): def test_literal_value_filter_to_dict_w_str(): - from google.cloud.bigtable.row_filters import LiteralValueFilter + from google.cloud.bigtable.data.row_filters import LiteralValueFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 value = "value_regex" @@ -886,7 +886,7 @@ def test_literal_value_filter_to_dict_w_str(): ], ) def test_literal_value_filter_w_int(value, expected_byte_string): - from google.cloud.bigtable.row_filters import LiteralValueFilter + from google.cloud.bigtable.data.row_filters import LiteralValueFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 row_filter = LiteralValueFilter(value) @@ -901,7 +901,7 @@ def test_literal_value_filter_w_int(value, expected_byte_string): def test_literal_value_filter___repr__(): - from google.cloud.bigtable.row_filters import LiteralValueFilter + from google.cloud.bigtable.data.row_filters import LiteralValueFilter value = "value_regex" row_filter = LiteralValueFilter(value) @@ -912,7 +912,7 @@ def test_literal_value_filter___repr__(): def test_value_range_filter_constructor_defaults(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter row_filter = ValueRangeFilter() @@ -923,7 +923,7 @@ def test_value_range_filter_constructor_defaults(): def test_value_range_filter_constructor_explicit(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter start_value = object() end_value = object() @@ -944,7 +944,7 @@ def test_value_range_filter_constructor_explicit(): def test_value_range_filter_constructor_w_int_values(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter import struct start_value = 1 @@ -962,21 +962,21 @@ def test_value_range_filter_constructor_w_int_values(): def test_value_range_filter_constructor_bad_start(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter with pytest.raises(ValueError): ValueRangeFilter(inclusive_start=True) def test_value_range_filter_constructor_bad_end(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter with pytest.raises(ValueError): ValueRangeFilter(inclusive_end=True) def test_value_range_filter___eq__(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter start_value = object() end_value = object() @@ -998,7 +998,7 @@ def test_value_range_filter___eq__(): def test_value_range_filter___eq__type_differ(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter row_filter1 = ValueRangeFilter() row_filter2 = object() @@ -1006,7 +1006,7 @@ def test_value_range_filter___eq__type_differ(): def test_value_range_filter___ne__(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter start_value = object() other_start_value = object() @@ -1029,7 +1029,7 @@ def test_value_range_filter___ne__(): def test_value_range_filter_to_pb(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter row_filter = ValueRangeFilter() expected_pb = _RowFilterPB(value_range_filter=_ValueRangePB()) @@ -1037,7 +1037,7 @@ def test_value_range_filter_to_pb(): def test_value_range_filter_to_dict(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 row_filter = ValueRangeFilter() @@ -1048,7 +1048,7 @@ def test_value_range_filter_to_dict(): def test_value_range_filter_to_pb_inclusive_start(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter value = b"some-value" row_filter = ValueRangeFilter(start_value=value) @@ -1058,7 +1058,7 @@ def test_value_range_filter_to_pb_inclusive_start(): def test_value_range_filter_to_pb_exclusive_start(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter value = b"some-value" row_filter = ValueRangeFilter(start_value=value, inclusive_start=False) @@ -1068,7 +1068,7 @@ def test_value_range_filter_to_pb_exclusive_start(): def test_value_range_filter_to_pb_inclusive_end(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter value = b"some-value" row_filter = ValueRangeFilter(end_value=value) @@ -1078,7 +1078,7 @@ def test_value_range_filter_to_pb_inclusive_end(): def test_value_range_filter_to_pb_exclusive_end(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter value = b"some-value" row_filter = ValueRangeFilter(end_value=value, inclusive_end=False) @@ -1088,7 +1088,7 @@ def test_value_range_filter_to_pb_exclusive_end(): def test_value_range_filter___repr__(): - from google.cloud.bigtable.row_filters import ValueRangeFilter + from google.cloud.bigtable.data.row_filters import ValueRangeFilter start_value = b"some-value" end_value = b"some-other-value" @@ -1133,7 +1133,7 @@ def test_cell_count___ne__same_value(): def test_cells_row_offset_filter_to_pb(): - from google.cloud.bigtable.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter num_cells = 76 row_filter = CellsRowOffsetFilter(num_cells) @@ -1143,7 +1143,7 @@ def test_cells_row_offset_filter_to_pb(): def test_cells_row_offset_filter_to_dict(): - from google.cloud.bigtable.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 num_cells = 76 @@ -1155,7 +1155,7 @@ def test_cells_row_offset_filter_to_dict(): def test_cells_row_offset_filter___repr__(): - from google.cloud.bigtable.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter num_cells = 76 row_filter = CellsRowOffsetFilter(num_cells) @@ -1166,7 +1166,7 @@ def test_cells_row_offset_filter___repr__(): def test_cells_row_limit_filter_to_pb(): - from google.cloud.bigtable.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter num_cells = 189 row_filter = CellsRowLimitFilter(num_cells) @@ -1176,7 +1176,7 @@ def test_cells_row_limit_filter_to_pb(): def test_cells_row_limit_filter_to_dict(): - from google.cloud.bigtable.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 num_cells = 189 @@ -1188,7 +1188,7 @@ def test_cells_row_limit_filter_to_dict(): def test_cells_row_limit_filter___repr__(): - from google.cloud.bigtable.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter num_cells = 189 row_filter = CellsRowLimitFilter(num_cells) @@ -1199,7 +1199,7 @@ def test_cells_row_limit_filter___repr__(): def test_cells_column_limit_filter_to_pb(): - from google.cloud.bigtable.row_filters import CellsColumnLimitFilter + from google.cloud.bigtable.data.row_filters import CellsColumnLimitFilter num_cells = 10 row_filter = CellsColumnLimitFilter(num_cells) @@ -1209,7 +1209,7 @@ def test_cells_column_limit_filter_to_pb(): def test_cells_column_limit_filter_to_dict(): - from google.cloud.bigtable.row_filters import CellsColumnLimitFilter + from google.cloud.bigtable.data.row_filters import CellsColumnLimitFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 num_cells = 10 @@ -1221,7 +1221,7 @@ def test_cells_column_limit_filter_to_dict(): def test_cells_column_limit_filter___repr__(): - from google.cloud.bigtable.row_filters import CellsColumnLimitFilter + from google.cloud.bigtable.data.row_filters import CellsColumnLimitFilter num_cells = 10 row_filter = CellsColumnLimitFilter(num_cells) @@ -1232,7 +1232,7 @@ def test_cells_column_limit_filter___repr__(): def test_strip_value_transformer_filter_to_pb(): - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter flag = True row_filter = StripValueTransformerFilter(flag) @@ -1242,7 +1242,7 @@ def test_strip_value_transformer_filter_to_pb(): def test_strip_value_transformer_filter_to_dict(): - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 flag = True @@ -1254,7 +1254,7 @@ def test_strip_value_transformer_filter_to_dict(): def test_strip_value_transformer_filter___repr__(): - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter flag = True row_filter = StripValueTransformerFilter(flag) @@ -1265,7 +1265,7 @@ def test_strip_value_transformer_filter___repr__(): def test_apply_label_filter_constructor(): - from google.cloud.bigtable.row_filters import ApplyLabelFilter + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter label = object() row_filter = ApplyLabelFilter(label) @@ -1273,7 +1273,7 @@ def test_apply_label_filter_constructor(): def test_apply_label_filter___eq__type_differ(): - from google.cloud.bigtable.row_filters import ApplyLabelFilter + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter label = object() row_filter1 = ApplyLabelFilter(label) @@ -1282,7 +1282,7 @@ def test_apply_label_filter___eq__type_differ(): def test_apply_label_filter___eq__same_value(): - from google.cloud.bigtable.row_filters import ApplyLabelFilter + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter label = object() row_filter1 = ApplyLabelFilter(label) @@ -1291,7 +1291,7 @@ def test_apply_label_filter___eq__same_value(): def test_apply_label_filter___ne__(): - from google.cloud.bigtable.row_filters import ApplyLabelFilter + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter label = object() other_label = object() @@ -1301,7 +1301,7 @@ def test_apply_label_filter___ne__(): def test_apply_label_filter_to_pb(): - from google.cloud.bigtable.row_filters import ApplyLabelFilter + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter label = "label" row_filter = ApplyLabelFilter(label) @@ -1311,7 +1311,7 @@ def test_apply_label_filter_to_pb(): def test_apply_label_filter_to_dict(): - from google.cloud.bigtable.row_filters import ApplyLabelFilter + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 label = "label" @@ -1323,7 +1323,7 @@ def test_apply_label_filter_to_dict(): def test_apply_label_filter___repr__(): - from google.cloud.bigtable.row_filters import ApplyLabelFilter + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter label = "label" row_filter = ApplyLabelFilter(label) @@ -1399,7 +1399,7 @@ def test_filter_combination___getitem__(): def test_filter_combination___str__(): - from google.cloud.bigtable.row_filters import PassAllFilter + from google.cloud.bigtable.data.row_filters import PassAllFilter for FilterType in _get_filter_combination_filters(): filters = [PassAllFilter(True), PassAllFilter(False)] @@ -1411,9 +1411,9 @@ def test_filter_combination___str__(): def test_row_filter_chain_to_pb(): - from google.cloud.bigtable.row_filters import RowFilterChain - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1._to_pb() @@ -1431,9 +1431,9 @@ def test_row_filter_chain_to_pb(): def test_row_filter_chain_to_dict(): - from google.cloud.bigtable.row_filters import RowFilterChain - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 row_filter1 = StripValueTransformerFilter(True) @@ -1452,10 +1452,10 @@ def test_row_filter_chain_to_dict(): def test_row_filter_chain_to_pb_nested(): - from google.cloud.bigtable.row_filters import CellsRowLimitFilter - from google.cloud.bigtable.row_filters import RowFilterChain - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) @@ -1476,10 +1476,10 @@ def test_row_filter_chain_to_pb_nested(): def test_row_filter_chain_to_dict_nested(): - from google.cloud.bigtable.row_filters import CellsRowLimitFilter - from google.cloud.bigtable.row_filters import RowFilterChain - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 row_filter1 = StripValueTransformerFilter(True) @@ -1502,9 +1502,9 @@ def test_row_filter_chain_to_dict_nested(): def test_row_filter_chain___repr__(): - from google.cloud.bigtable.row_filters import RowFilterChain - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) @@ -1516,9 +1516,9 @@ def test_row_filter_chain___repr__(): def test_row_filter_chain___str__(): - from google.cloud.bigtable.row_filters import RowFilterChain - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) @@ -1533,9 +1533,9 @@ def test_row_filter_chain___str__(): def test_row_filter_union_to_pb(): - from google.cloud.bigtable.row_filters import RowFilterUnion - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1._to_pb() @@ -1553,9 +1553,9 @@ def test_row_filter_union_to_pb(): def test_row_filter_union_to_dict(): - from google.cloud.bigtable.row_filters import RowFilterUnion - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 row_filter1 = StripValueTransformerFilter(True) @@ -1574,10 +1574,10 @@ def test_row_filter_union_to_dict(): def test_row_filter_union_to_pb_nested(): - from google.cloud.bigtable.row_filters import CellsRowLimitFilter - from google.cloud.bigtable.row_filters import RowFilterUnion - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) @@ -1598,10 +1598,10 @@ def test_row_filter_union_to_pb_nested(): def test_row_filter_union_to_dict_nested(): - from google.cloud.bigtable.row_filters import CellsRowLimitFilter - from google.cloud.bigtable.row_filters import RowFilterUnion - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 row_filter1 = StripValueTransformerFilter(True) @@ -1624,9 +1624,9 @@ def test_row_filter_union_to_dict_nested(): def test_row_filter_union___repr__(): - from google.cloud.bigtable.row_filters import RowFilterUnion - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) @@ -1638,9 +1638,9 @@ def test_row_filter_union___repr__(): def test_row_filter_union___str__(): - from google.cloud.bigtable.row_filters import RowFilterUnion - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) @@ -1655,7 +1655,7 @@ def test_row_filter_union___str__(): def test_conditional_row_filter_constructor(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter predicate_filter = object() true_filter = object() @@ -1669,7 +1669,7 @@ def test_conditional_row_filter_constructor(): def test_conditional_row_filter___eq__(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter predicate_filter = object() true_filter = object() @@ -1684,7 +1684,7 @@ def test_conditional_row_filter___eq__(): def test_conditional_row_filter___eq__type_differ(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter predicate_filter = object() true_filter = object() @@ -1697,7 +1697,7 @@ def test_conditional_row_filter___eq__type_differ(): def test_conditional_row_filter___ne__(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter predicate_filter = object() other_predicate_filter = object() @@ -1713,10 +1713,10 @@ def test_conditional_row_filter___ne__(): def test_conditional_row_filter_to_pb(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter - from google.cloud.bigtable.row_filters import CellsRowOffsetFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1._to_pb() @@ -1743,10 +1743,10 @@ def test_conditional_row_filter_to_pb(): def test_conditional_row_filter_to_dict(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter - from google.cloud.bigtable.row_filters import CellsRowOffsetFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 row_filter1 = StripValueTransformerFilter(True) @@ -1776,9 +1776,9 @@ def test_conditional_row_filter_to_dict(): def test_conditional_row_filter_to_pb_true_only(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1._to_pb() @@ -1798,9 +1798,9 @@ def test_conditional_row_filter_to_pb_true_only(): def test_conditional_row_filter_to_dict_true_only(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 row_filter1 = StripValueTransformerFilter(True) @@ -1824,9 +1824,9 @@ def test_conditional_row_filter_to_dict_true_only(): def test_conditional_row_filter_to_pb_false_only(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1._to_pb() @@ -1846,9 +1846,9 @@ def test_conditional_row_filter_to_pb_false_only(): def test_conditional_row_filter_to_dict_false_only(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter from google.cloud.bigtable_v2.types import data as data_v2_pb2 row_filter1 = StripValueTransformerFilter(True) @@ -1872,9 +1872,9 @@ def test_conditional_row_filter_to_dict_false_only(): def test_conditional_row_filter___repr__(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) @@ -1893,10 +1893,10 @@ def test_conditional_row_filter___repr__(): def test_conditional_row_filter___str__(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import RowFilterUnion - from google.cloud.bigtable.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) @@ -1931,7 +1931,7 @@ def test_conditional_row_filter___str__(): ], ) def test_literal_value__write_literal_regex(input_arg, expected_bytes): - from google.cloud.bigtable.row_filters import LiteralValueFilter + from google.cloud.bigtable.data.row_filters import LiteralValueFilter filter_ = LiteralValueFilter(input_arg) assert filter_.regex == expected_bytes @@ -1980,7 +1980,7 @@ def _ValueRangePB(*args, **kw): def _get_regex_filters(): - from google.cloud.bigtable.row_filters import ( + from google.cloud.bigtable.data.row_filters import ( RowKeyRegexFilter, FamilyNameRegexFilter, ColumnQualifierRegexFilter, @@ -1998,7 +1998,7 @@ def _get_regex_filters(): def _get_bool_filters(): - from google.cloud.bigtable.row_filters import ( + from google.cloud.bigtable.data.row_filters import ( SinkFilter, PassAllFilter, BlockAllFilter, @@ -2014,7 +2014,7 @@ def _get_bool_filters(): def _get_cell_count_filters(): - from google.cloud.bigtable.row_filters import ( + from google.cloud.bigtable.data.row_filters import ( CellsRowLimitFilter, CellsRowOffsetFilter, CellsColumnLimitFilter, @@ -2028,7 +2028,7 @@ def _get_cell_count_filters(): def _get_filter_combination_filters(): - from google.cloud.bigtable.row_filters import ( + from google.cloud.bigtable.data.row_filters import ( RowFilterChain, RowFilterUnion, ) diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 8e4004ab1..8498e4fa5 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -8202,6 +8202,7 @@ def test_update_table_rest(request_type): "source_table": "source_table_value", }, }, + "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, "deletion_protection": True, } request = request_type(**request_init) @@ -8399,6 +8400,7 @@ def test_update_table_rest_bad_request( "source_table": "source_table_value", }, }, + "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, "deletion_protection": True, } request = request_type(**request_init) diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py index b1500aa48..03ba3044f 100644 --- a/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -100,7 +100,6 @@ def test__get_default_mtls_endpoint(): [ (BigtableClient, "grpc"), (BigtableAsyncClient, "grpc_asyncio"), - (BigtableAsyncClient, "pooled_grpc_asyncio"), (BigtableClient, "rest"), ], ) @@ -117,7 +116,7 @@ def test_bigtable_client_from_service_account_info(client_class, transport_name) assert client.transport._host == ( "bigtable.googleapis.com:443" - if transport_name in ["grpc", "grpc_asyncio", "pooled_grpc_asyncio"] + if transport_name in ["grpc", "grpc_asyncio"] else "https://bigtable.googleapis.com" ) @@ -127,7 +126,6 @@ def test_bigtable_client_from_service_account_info(client_class, transport_name) [ (transports.BigtableGrpcTransport, "grpc"), (transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), - (transports.PooledBigtableGrpcAsyncIOTransport, "pooled_grpc_asyncio"), (transports.BigtableRestTransport, "rest"), ], ) @@ -154,7 +152,6 @@ def test_bigtable_client_service_account_always_use_jwt( [ (BigtableClient, "grpc"), (BigtableAsyncClient, "grpc_asyncio"), - (BigtableAsyncClient, "pooled_grpc_asyncio"), (BigtableClient, "rest"), ], ) @@ -178,7 +175,7 @@ def test_bigtable_client_from_service_account_file(client_class, transport_name) assert client.transport._host == ( "bigtable.googleapis.com:443" - if transport_name in ["grpc", "grpc_asyncio", "pooled_grpc_asyncio"] + if transport_name in ["grpc", "grpc_asyncio"] else "https://bigtable.googleapis.com" ) @@ -200,11 +197,6 @@ def test_bigtable_client_get_transport_class(): [ (BigtableClient, transports.BigtableGrpcTransport, "grpc"), (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), - ( - BigtableAsyncClient, - transports.PooledBigtableGrpcAsyncIOTransport, - "pooled_grpc_asyncio", - ), (BigtableClient, transports.BigtableRestTransport, "rest"), ], ) @@ -340,12 +332,6 @@ def test_bigtable_client_client_options(client_class, transport_class, transport "grpc_asyncio", "true", ), - ( - BigtableAsyncClient, - transports.PooledBigtableGrpcAsyncIOTransport, - "pooled_grpc_asyncio", - "true", - ), (BigtableClient, transports.BigtableGrpcTransport, "grpc", "false"), ( BigtableAsyncClient, @@ -353,12 +339,6 @@ def test_bigtable_client_client_options(client_class, transport_class, transport "grpc_asyncio", "false", ), - ( - BigtableAsyncClient, - transports.PooledBigtableGrpcAsyncIOTransport, - "pooled_grpc_asyncio", - "false", - ), (BigtableClient, transports.BigtableRestTransport, "rest", "true"), (BigtableClient, transports.BigtableRestTransport, "rest", "false"), ], @@ -550,11 +530,6 @@ def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class): [ (BigtableClient, transports.BigtableGrpcTransport, "grpc"), (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), - ( - BigtableAsyncClient, - transports.PooledBigtableGrpcAsyncIOTransport, - "pooled_grpc_asyncio", - ), (BigtableClient, transports.BigtableRestTransport, "rest"), ], ) @@ -591,12 +566,6 @@ def test_bigtable_client_client_options_scopes( "grpc_asyncio", grpc_helpers_async, ), - ( - BigtableAsyncClient, - transports.PooledBigtableGrpcAsyncIOTransport, - "pooled_grpc_asyncio", - grpc_helpers_async, - ), (BigtableClient, transports.BigtableRestTransport, "rest", None), ], ) @@ -743,35 +712,6 @@ def test_read_rows(request_type, transport: str = "grpc"): assert isinstance(message, bigtable.ReadRowsResponse) -def test_read_rows_pooled_rotation(transport: str = "pooled_grpc_asyncio"): - with mock.patch.object( - transports.pooled_grpc_asyncio.PooledChannel, "next_channel" - ) as next_channel: - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = {} - - channel = client.transport._grpc_channel._pool[ - client.transport._grpc_channel._next_idx - ] - next_channel.return_value = channel - - response = client.read_rows(request) - - # Establish that next_channel was called - next_channel.assert_called_once() - # Establish that subsequent calls all call next_channel - starting_idx = client.transport._grpc_channel._next_idx - for i in range(2, 10): - response = client.read_rows(request) - assert next_channel.call_count == i - - def test_read_rows_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -991,35 +931,6 @@ def test_sample_row_keys(request_type, transport: str = "grpc"): assert isinstance(message, bigtable.SampleRowKeysResponse) -def test_sample_row_keys_pooled_rotation(transport: str = "pooled_grpc_asyncio"): - with mock.patch.object( - transports.pooled_grpc_asyncio.PooledChannel, "next_channel" - ) as next_channel: - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = {} - - channel = client.transport._grpc_channel._pool[ - client.transport._grpc_channel._next_idx - ] - next_channel.return_value = channel - - response = client.sample_row_keys(request) - - # Establish that next_channel was called - next_channel.assert_called_once() - # Establish that subsequent calls all call next_channel - starting_idx = client.transport._grpc_channel._next_idx - for i in range(2, 10): - response = client.sample_row_keys(request) - assert next_channel.call_count == i - - def test_sample_row_keys_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1238,35 +1149,6 @@ def test_mutate_row(request_type, transport: str = "grpc"): assert isinstance(response, bigtable.MutateRowResponse) -def test_mutate_row_pooled_rotation(transport: str = "pooled_grpc_asyncio"): - with mock.patch.object( - transports.pooled_grpc_asyncio.PooledChannel, "next_channel" - ) as next_channel: - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = {} - - channel = client.transport._grpc_channel._pool[ - client.transport._grpc_channel._next_idx - ] - next_channel.return_value = channel - - response = client.mutate_row(request) - - # Establish that next_channel was called - next_channel.assert_called_once() - # Establish that subsequent calls all call next_channel - starting_idx = client.transport._grpc_channel._next_idx - for i in range(2, 10): - response = client.mutate_row(request) - assert next_channel.call_count == i - - def test_mutate_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1530,35 +1412,6 @@ def test_mutate_rows(request_type, transport: str = "grpc"): assert isinstance(message, bigtable.MutateRowsResponse) -def test_mutate_rows_pooled_rotation(transport: str = "pooled_grpc_asyncio"): - with mock.patch.object( - transports.pooled_grpc_asyncio.PooledChannel, "next_channel" - ) as next_channel: - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = {} - - channel = client.transport._grpc_channel._pool[ - client.transport._grpc_channel._next_idx - ] - next_channel.return_value = channel - - response = client.mutate_rows(request) - - # Establish that next_channel was called - next_channel.assert_called_once() - # Establish that subsequent calls all call next_channel - starting_idx = client.transport._grpc_channel._next_idx - for i in range(2, 10): - response = client.mutate_rows(request) - assert next_channel.call_count == i - - def test_mutate_rows_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1792,35 +1645,6 @@ def test_check_and_mutate_row(request_type, transport: str = "grpc"): assert response.predicate_matched is True -def test_check_and_mutate_row_pooled_rotation(transport: str = "pooled_grpc_asyncio"): - with mock.patch.object( - transports.pooled_grpc_asyncio.PooledChannel, "next_channel" - ) as next_channel: - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = {} - - channel = client.transport._grpc_channel._pool[ - client.transport._grpc_channel._next_idx - ] - next_channel.return_value = channel - - response = client.check_and_mutate_row(request) - - # Establish that next_channel was called - next_channel.assert_called_once() - # Establish that subsequent calls all call next_channel - starting_idx = client.transport._grpc_channel._next_idx - for i in range(2, 10): - response = client.check_and_mutate_row(request) - assert next_channel.call_count == i - - def test_check_and_mutate_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2198,35 +2022,6 @@ def test_ping_and_warm(request_type, transport: str = "grpc"): assert isinstance(response, bigtable.PingAndWarmResponse) -def test_ping_and_warm_pooled_rotation(transport: str = "pooled_grpc_asyncio"): - with mock.patch.object( - transports.pooled_grpc_asyncio.PooledChannel, "next_channel" - ) as next_channel: - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = {} - - channel = client.transport._grpc_channel._pool[ - client.transport._grpc_channel._next_idx - ] - next_channel.return_value = channel - - response = client.ping_and_warm(request) - - # Establish that next_channel was called - next_channel.assert_called_once() - # Establish that subsequent calls all call next_channel - starting_idx = client.transport._grpc_channel._next_idx - for i in range(2, 10): - response = client.ping_and_warm(request) - assert next_channel.call_count == i - - def test_ping_and_warm_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2447,35 +2242,6 @@ def test_read_modify_write_row(request_type, transport: str = "grpc"): assert isinstance(response, bigtable.ReadModifyWriteRowResponse) -def test_read_modify_write_row_pooled_rotation(transport: str = "pooled_grpc_asyncio"): - with mock.patch.object( - transports.pooled_grpc_asyncio.PooledChannel, "next_channel" - ) as next_channel: - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = {} - - channel = client.transport._grpc_channel._pool[ - client.transport._grpc_channel._next_idx - ] - next_channel.return_value = channel - - response = client.read_modify_write_row(request) - - # Establish that next_channel was called - next_channel.assert_called_once() - # Establish that subsequent calls all call next_channel - starting_idx = client.transport._grpc_channel._next_idx - for i in range(2, 10): - response = client.read_modify_write_row(request) - assert next_channel.call_count == i - - def test_read_modify_write_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2735,37 +2501,6 @@ def test_generate_initial_change_stream_partitions( ) -def test_generate_initial_change_stream_partitions_pooled_rotation( - transport: str = "pooled_grpc_asyncio", -): - with mock.patch.object( - transports.pooled_grpc_asyncio.PooledChannel, "next_channel" - ) as next_channel: - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = {} - - channel = client.transport._grpc_channel._pool[ - client.transport._grpc_channel._next_idx - ] - next_channel.return_value = channel - - response = client.generate_initial_change_stream_partitions(request) - - # Establish that next_channel was called - next_channel.assert_called_once() - # Establish that subsequent calls all call next_channel - starting_idx = client.transport._grpc_channel._next_idx - for i in range(2, 10): - response = client.generate_initial_change_stream_partitions(request) - assert next_channel.call_count == i - - def test_generate_initial_change_stream_partitions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -3025,35 +2760,6 @@ def test_read_change_stream(request_type, transport: str = "grpc"): assert isinstance(message, bigtable.ReadChangeStreamResponse) -def test_read_change_stream_pooled_rotation(transport: str = "pooled_grpc_asyncio"): - with mock.patch.object( - transports.pooled_grpc_asyncio.PooledChannel, "next_channel" - ) as next_channel: - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = {} - - channel = client.transport._grpc_channel._pool[ - client.transport._grpc_channel._next_idx - ] - next_channel.return_value = channel - - response = client.read_change_stream(request) - - # Establish that next_channel was called - next_channel.assert_called_once() - # Establish that subsequent calls all call next_channel - starting_idx = client.transport._grpc_channel._next_idx - for i in range(2, 10): - response = client.read_change_stream(request) - assert next_channel.call_count == i - - def test_read_change_stream_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -5957,7 +5663,6 @@ def test_transport_get_channel(): [ transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport, - transports.PooledBigtableGrpcAsyncIOTransport, transports.BigtableRestTransport, ], ) @@ -6105,7 +5810,6 @@ def test_bigtable_auth_adc(): [ transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport, - transports.PooledBigtableGrpcAsyncIOTransport, ], ) def test_bigtable_transport_auth_adc(transport_class): @@ -6133,7 +5837,6 @@ def test_bigtable_transport_auth_adc(transport_class): [ transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport, - transports.PooledBigtableGrpcAsyncIOTransport, transports.BigtableRestTransport, ], ) @@ -6236,61 +5939,6 @@ def test_bigtable_grpc_transport_client_cert_source_for_mtls(transport_class): ) -@pytest.mark.parametrize( - "transport_class", [transports.PooledBigtableGrpcAsyncIOTransport] -) -def test_bigtable_pooled_grpc_transport_client_cert_source_for_mtls(transport_class): - cred = ga_credentials.AnonymousCredentials() - - # test with invalid pool size - with pytest.raises(ValueError): - transport_class( - host="squid.clam.whelk", - credentials=cred, - pool_size=0, - ) - - # Check ssl_channel_credentials is used if provided. - for pool_num in range(1, 5): - with mock.patch.object( - transport_class, "create_channel" - ) as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds, - pool_size=pool_num, - ) - mock_create_channel.assert_called_with( - pool_num, - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert mock_create_channel.call_count == 1 - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback, - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, private_key=expected_key - ) - - def test_bigtable_http_transport_client_cert_source_for_mtls(): cred = ga_credentials.AnonymousCredentials() with mock.patch( @@ -6307,7 +5955,6 @@ def test_bigtable_http_transport_client_cert_source_for_mtls(): [ "grpc", "grpc_asyncio", - "pooled_grpc_asyncio", "rest", ], ) @@ -6321,7 +5968,7 @@ def test_bigtable_host_no_port(transport_name): ) assert client.transport._host == ( "bigtable.googleapis.com:443" - if transport_name in ["grpc", "grpc_asyncio", "pooled_grpc_asyncio"] + if transport_name in ["grpc", "grpc_asyncio"] else "https://bigtable.googleapis.com" ) @@ -6331,7 +5978,6 @@ def test_bigtable_host_no_port(transport_name): [ "grpc", "grpc_asyncio", - "pooled_grpc_asyncio", "rest", ], ) @@ -6345,7 +5991,7 @@ def test_bigtable_host_with_port(transport_name): ) assert client.transport._host == ( "bigtable.googleapis.com:8000" - if transport_name in ["grpc", "grpc_asyncio", "pooled_grpc_asyncio"] + if transport_name in ["grpc", "grpc_asyncio"] else "https://bigtable.googleapis.com:8000" ) @@ -6701,24 +6347,6 @@ async def test_transport_close_async(): async with client: close.assert_not_called() close.assert_called_once() - close.assert_awaited() - - -@pytest.mark.asyncio -async def test_pooled_transport_close_async(): - client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="pooled_grpc_asyncio", - ) - num_channels = len(client.transport._grpc_channel._pool) - with mock.patch.object( - type(client.transport._grpc_channel._pool[0]), "close" - ) as close: - async with client: - close.assert_not_called() - close.assert_called() - assert close.call_count == num_channels - close.assert_awaited() def test_transport_close(): @@ -6785,128 +6413,3 @@ def test_api_key_credentials(client_class, transport_class): always_use_jwt_access=True, api_audience=None, ) - - -@pytest.mark.asyncio -async def test_pooled_transport_replace_default(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="pooled_grpc_asyncio", - ) - num_channels = len(client.transport._grpc_channel._pool) - for replace_idx in range(num_channels): - prev_pool = [channel for channel in client.transport._grpc_channel._pool] - grace_period = 4 - with mock.patch.object( - type(client.transport._grpc_channel._pool[0]), "close" - ) as close: - await client.transport.replace_channel(replace_idx, grace=grace_period) - close.assert_called_once() - close.assert_awaited() - close.assert_called_with(grace=grace_period) - assert isinstance( - client.transport._grpc_channel._pool[replace_idx], grpc.aio.Channel - ) - # only the specified channel should be replaced - for i in range(num_channels): - if i == replace_idx: - assert client.transport._grpc_channel._pool[i] != prev_pool[i] - else: - assert client.transport._grpc_channel._pool[i] == prev_pool[i] - with pytest.raises(ValueError): - await client.transport.replace_channel(num_channels + 1) - with pytest.raises(ValueError): - await client.transport.replace_channel(-1) - - -@pytest.mark.asyncio -async def test_pooled_transport_replace_explicit(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="pooled_grpc_asyncio", - ) - num_channels = len(client.transport._grpc_channel._pool) - for replace_idx in range(num_channels): - prev_pool = [channel for channel in client.transport._grpc_channel._pool] - grace_period = 0 - with mock.patch.object( - type(client.transport._grpc_channel._pool[0]), "close" - ) as close: - new_channel = grpc.aio.insecure_channel("localhost:8080") - await client.transport.replace_channel( - replace_idx, grace=grace_period, new_channel=new_channel - ) - close.assert_called_once() - close.assert_awaited() - close.assert_called_with(grace=grace_period) - assert client.transport._grpc_channel._pool[replace_idx] == new_channel - # only the specified channel should be replaced - for i in range(num_channels): - if i == replace_idx: - assert client.transport._grpc_channel._pool[i] != prev_pool[i] - else: - assert client.transport._grpc_channel._pool[i] == prev_pool[i] - - -def test_pooled_transport_next_channel(): - num_channels = 10 - transport = transports.PooledBigtableGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - pool_size=num_channels, - ) - assert len(transport._grpc_channel._pool) == num_channels - transport._grpc_channel._next_idx = 0 - # rotate through all channels multiple times - num_cycles = 4 - for _ in range(num_cycles): - for i in range(num_channels - 1): - assert transport._grpc_channel._next_idx == i - got_channel = transport._grpc_channel.next_channel() - assert got_channel == transport._grpc_channel._pool[i] - assert transport._grpc_channel._next_idx == (i + 1) - # test wrap around - assert transport._grpc_channel._next_idx == num_channels - 1 - got_channel = transport._grpc_channel.next_channel() - assert got_channel == transport._grpc_channel._pool[num_channels - 1] - assert transport._grpc_channel._next_idx == 0 - - -def test_pooled_transport_pool_unique_channels(): - num_channels = 50 - - transport = transports.PooledBigtableGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - pool_size=num_channels, - ) - channel_list = [channel for channel in transport._grpc_channel._pool] - channel_set = set(channel_list) - assert len(channel_list) == num_channels - assert len(channel_set) == num_channels - for channel in channel_list: - assert isinstance(channel, grpc.aio.Channel) - - -def test_pooled_transport_pool_creation(): - # channels should be created with the specified options - num_channels = 50 - creds = ga_credentials.AnonymousCredentials() - scopes = ["test1", "test2"] - quota_project_id = "test3" - host = "testhost:8080" - with mock.patch( - "google.api_core.grpc_helpers_async.create_channel" - ) as create_channel: - transport = transports.PooledBigtableGrpcAsyncIOTransport( - credentials=creds, - pool_size=num_channels, - scopes=scopes, - quota_project_id=quota_project_id, - host=host, - ) - assert create_channel.call_count == num_channels - for i in range(num_channels): - kwargs = create_channel.call_args_list[i][1] - assert kwargs["target"] == host - assert kwargs["credentials"] == creds - assert kwargs["scopes"] == scopes - assert kwargs["quota_project_id"] == quota_project_id diff --git a/tests/unit/test_iterators.py b/tests/unit/test_iterators.py deleted file mode 100644 index f7aee2822..000000000 --- a/tests/unit/test_iterators.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations - -import sys -import asyncio -import pytest - -from google.cloud.bigtable._read_rows import _ReadRowsOperation - -# try/except added for compatibility with python < 3.8 -try: - from unittest import mock -except ImportError: # pragma: NO COVER - import mock # type: ignore - - -class MockStream(_ReadRowsOperation): - """ - Mock a _ReadRowsOperation stream for testing - """ - - def __init__(self, items=None, errors=None, operation_timeout=None): - self.transient_errors = errors - self.operation_timeout = operation_timeout - self.next_idx = 0 - if items is None: - items = list(range(10)) - self.items = items - - def __aiter__(self): - return self - - async def __anext__(self): - if self.next_idx >= len(self.items): - raise StopAsyncIteration - item = self.items[self.next_idx] - self.next_idx += 1 - if isinstance(item, Exception): - raise item - return item - - async def aclose(self): - pass - - -class TestReadRowsIterator: - async def mock_stream(self, size=10): - for i in range(size): - yield i - - def _make_one(self, *args, **kwargs): - from google.cloud.bigtable.iterators import ReadRowsIterator - - stream = MockStream(*args, **kwargs) - return ReadRowsIterator(stream) - - def test_ctor(self): - with mock.patch("time.time", return_value=0): - iterator = self._make_one() - assert iterator.last_interaction_time == 0 - assert iterator._idle_timeout_task is None - assert iterator.active is True - - def test___aiter__(self): - iterator = self._make_one() - assert iterator.__aiter__() is iterator - - @pytest.mark.skipif( - sys.version_info < (3, 8), reason="mock coroutine requires python3.8 or higher" - ) - @pytest.mark.asyncio - async def test__start_idle_timer(self): - """Should start timer coroutine""" - iterator = self._make_one() - expected_timeout = 10 - with mock.patch("time.time", return_value=1): - with mock.patch.object(iterator, "_idle_timeout_coroutine") as mock_coro: - await iterator._start_idle_timer(expected_timeout) - assert mock_coro.call_count == 1 - assert mock_coro.call_args[0] == (expected_timeout,) - assert iterator.last_interaction_time == 1 - assert iterator._idle_timeout_task is not None - - @pytest.mark.skipif( - sys.version_info < (3, 8), reason="mock coroutine requires python3.8 or higher" - ) - @pytest.mark.asyncio - async def test__start_idle_timer_duplicate(self): - """Multiple calls should replace task""" - iterator = self._make_one() - with mock.patch.object(iterator, "_idle_timeout_coroutine") as mock_coro: - await iterator._start_idle_timer(1) - first_task = iterator._idle_timeout_task - await iterator._start_idle_timer(2) - second_task = iterator._idle_timeout_task - assert mock_coro.call_count == 2 - - assert first_task is not None - assert first_task != second_task - # old tasks hould be cancelled - with pytest.raises(asyncio.CancelledError): - await first_task - # new task should not be cancelled - await second_task - - @pytest.mark.asyncio - async def test__idle_timeout_coroutine(self): - from google.cloud.bigtable.exceptions import IdleTimeout - - iterator = self._make_one() - await iterator._idle_timeout_coroutine(0.05) - await asyncio.sleep(0.1) - assert iterator.active is False - with pytest.raises(IdleTimeout): - await iterator.__anext__() - - @pytest.mark.asyncio - async def test__idle_timeout_coroutine_extensions(self): - """touching the generator should reset the idle timer""" - iterator = self._make_one(items=list(range(100))) - await iterator._start_idle_timer(0.05) - for i in range(10): - # will not expire as long as it is in use - assert iterator.active is True - await iterator.__anext__() - await asyncio.sleep(0.03) - # now let it expire - await asyncio.sleep(0.5) - assert iterator.active is False - - @pytest.mark.asyncio - async def test___anext__(self): - num_rows = 10 - iterator = self._make_one(items=list(range(num_rows))) - for i in range(num_rows): - assert await iterator.__anext__() == i - with pytest.raises(StopAsyncIteration): - await iterator.__anext__() - - @pytest.mark.asyncio - async def test___anext__with_deadline_error(self): - """ - RetryErrors mean a deadline has been hit. - Should be wrapped in a DeadlineExceeded exception - """ - from google.api_core import exceptions as core_exceptions - - items = [1, core_exceptions.RetryError("retry error", None)] - expected_timeout = 99 - iterator = self._make_one(items=items, operation_timeout=expected_timeout) - assert await iterator.__anext__() == 1 - with pytest.raises(core_exceptions.DeadlineExceeded) as exc: - await iterator.__anext__() - assert f"operation_timeout of {expected_timeout:0.1f}s exceeded" in str( - exc.value - ) - assert exc.value.__cause__ is None - - @pytest.mark.asyncio - async def test___anext__with_deadline_error_with_cause(self): - """ - Transient errors should be exposed as an error group - """ - from google.api_core import exceptions as core_exceptions - from google.cloud.bigtable.exceptions import RetryExceptionGroup - - items = [1, core_exceptions.RetryError("retry error", None)] - expected_timeout = 99 - errors = [RuntimeError("error1"), ValueError("error2")] - iterator = self._make_one( - items=items, operation_timeout=expected_timeout, errors=errors - ) - assert await iterator.__anext__() == 1 - with pytest.raises(core_exceptions.DeadlineExceeded) as exc: - await iterator.__anext__() - assert f"operation_timeout of {expected_timeout:0.1f}s exceeded" in str( - exc.value - ) - error_group = exc.value.__cause__ - assert isinstance(error_group, RetryExceptionGroup) - assert len(error_group.exceptions) == 2 - assert error_group.exceptions[0] is errors[0] - assert error_group.exceptions[1] is errors[1] - assert "2 failed attempts" in str(error_group) - - @pytest.mark.asyncio - async def test___anext__with_error(self): - """ - Other errors should be raised as-is - """ - from google.api_core import exceptions as core_exceptions - - items = [1, core_exceptions.InternalServerError("mock error")] - iterator = self._make_one(items=items) - assert await iterator.__anext__() == 1 - with pytest.raises(core_exceptions.InternalServerError) as exc: - await iterator.__anext__() - assert exc.value is items[1] - assert iterator.active is False - # next call should raise same error - with pytest.raises(core_exceptions.InternalServerError) as exc: - await iterator.__anext__() - - @pytest.mark.asyncio - async def test__finish_with_error(self): - iterator = self._make_one() - await iterator._start_idle_timer(10) - timeout_task = iterator._idle_timeout_task - assert await iterator.__anext__() == 0 - assert iterator.active is True - err = ZeroDivisionError("mock error") - await iterator._finish_with_error(err) - assert iterator.active is False - assert iterator._error is err - assert iterator._idle_timeout_task is None - with pytest.raises(ZeroDivisionError) as exc: - await iterator.__anext__() - assert exc.value is err - # timeout task should be cancelled - with pytest.raises(asyncio.CancelledError): - await timeout_task - - @pytest.mark.asyncio - async def test_aclose(self): - iterator = self._make_one() - await iterator._start_idle_timer(10) - timeout_task = iterator._idle_timeout_task - assert await iterator.__anext__() == 0 - assert iterator.active is True - await iterator.aclose() - assert iterator.active is False - assert isinstance(iterator._error, StopAsyncIteration) - assert iterator._idle_timeout_task is None - with pytest.raises(StopAsyncIteration) as e: - await iterator.__anext__() - assert "closed" in str(e.value) - # timeout task should be cancelled - with pytest.raises(asyncio.CancelledError): - await timeout_task diff --git a/tests/unit/v2_client/test_app_profile.py b/tests/unit/v2_client/test_app_profile.py index 575f25194..660ee7899 100644 --- a/tests/unit/v2_client/test_app_profile.py +++ b/tests/unit/v2_client/test_app_profile.py @@ -32,19 +32,19 @@ def _make_app_profile(*args, **kwargs): - from google.cloud.bigtable.deprecated.app_profile import AppProfile + from google.cloud.bigtable.app_profile import AppProfile return AppProfile(*args, **kwargs) def _make_client(*args, **kwargs): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client return Client(*args, **kwargs) def test_app_profile_constructor_defaults(): - from google.cloud.bigtable.deprecated.app_profile import AppProfile + from google.cloud.bigtable.app_profile import AppProfile client = _Client(PROJECT) instance = _Instance(INSTANCE_ID, client) @@ -60,7 +60,7 @@ def test_app_profile_constructor_defaults(): def test_app_profile_constructor_explicit(): - from google.cloud.bigtable.deprecated.enums import RoutingPolicyType + from google.cloud.bigtable.enums import RoutingPolicyType ANY = RoutingPolicyType.ANY DESCRIPTION_1 = "routing policy any" @@ -99,7 +99,7 @@ def test_app_profile_constructor_explicit(): def test_app_profile_constructor_multi_cluster_ids(): - from google.cloud.bigtable.deprecated.enums import RoutingPolicyType + from google.cloud.bigtable.enums import RoutingPolicyType ANY = RoutingPolicyType.ANY DESCRIPTION_1 = "routing policy any" @@ -166,8 +166,8 @@ def test_app_profile___ne__(): def test_app_profile_from_pb_success_w_routing_any(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.app_profile import AppProfile - from google.cloud.bigtable.deprecated.enums import RoutingPolicyType + from google.cloud.bigtable.app_profile import AppProfile + from google.cloud.bigtable.enums import RoutingPolicyType client = _Client(PROJECT) instance = _Instance(INSTANCE_ID, client) @@ -195,8 +195,8 @@ def test_app_profile_from_pb_success_w_routing_any(): def test_app_profile_from_pb_success_w_routing_any_multi_cluster_ids(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.app_profile import AppProfile - from google.cloud.bigtable.deprecated.enums import RoutingPolicyType + from google.cloud.bigtable.app_profile import AppProfile + from google.cloud.bigtable.enums import RoutingPolicyType client = _Client(PROJECT) instance = _Instance(INSTANCE_ID, client) @@ -226,8 +226,8 @@ def test_app_profile_from_pb_success_w_routing_any_multi_cluster_ids(): def test_app_profile_from_pb_success_w_routing_single(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.app_profile import AppProfile - from google.cloud.bigtable.deprecated.enums import RoutingPolicyType + from google.cloud.bigtable.app_profile import AppProfile + from google.cloud.bigtable.enums import RoutingPolicyType client = _Client(PROJECT) instance = _Instance(INSTANCE_ID, client) @@ -259,7 +259,7 @@ def test_app_profile_from_pb_success_w_routing_single(): def test_app_profile_from_pb_w_bad_app_profile_name(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.app_profile import AppProfile + from google.cloud.bigtable.app_profile import AppProfile bad_app_profile_name = "BAD_NAME" @@ -271,7 +271,7 @@ def test_app_profile_from_pb_w_bad_app_profile_name(): def test_app_profile_from_pb_w_instance_id_mistmatch(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.app_profile import AppProfile + from google.cloud.bigtable.app_profile import AppProfile ALT_INSTANCE_ID = "ALT_INSTANCE_ID" client = _Client(PROJECT) @@ -286,7 +286,7 @@ def test_app_profile_from_pb_w_instance_id_mistmatch(): def test_app_profile_from_pb_w_project_mistmatch(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.app_profile import AppProfile + from google.cloud.bigtable.app_profile import AppProfile ALT_PROJECT = "ALT_PROJECT" client = _Client(project=ALT_PROJECT) @@ -304,7 +304,7 @@ def test_app_profile_reload_w_routing_any(): BigtableInstanceAdminClient, ) from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.enums import RoutingPolicyType + from google.cloud.bigtable.enums import RoutingPolicyType api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() @@ -400,8 +400,8 @@ def test_app_profile_create_w_routing_any(): from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( BigtableInstanceAdminClient, ) - from google.cloud.bigtable.deprecated.app_profile import AppProfile - from google.cloud.bigtable.deprecated.enums import RoutingPolicyType + from google.cloud.bigtable.app_profile import AppProfile + from google.cloud.bigtable.enums import RoutingPolicyType credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -461,8 +461,8 @@ def test_app_profile_create_w_routing_single(): from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( BigtableInstanceAdminClient, ) - from google.cloud.bigtable.deprecated.app_profile import AppProfile - from google.cloud.bigtable.deprecated.enums import RoutingPolicyType + from google.cloud.bigtable.app_profile import AppProfile + from google.cloud.bigtable.enums import RoutingPolicyType credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -533,7 +533,7 @@ def test_app_profile_update_w_routing_any(): from google.cloud.bigtable_admin_v2.types import ( bigtable_instance_admin as messages_v2_pb2, ) - from google.cloud.bigtable.deprecated.enums import RoutingPolicyType + from google.cloud.bigtable.enums import RoutingPolicyType from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( BigtableInstanceAdminClient, ) @@ -608,7 +608,7 @@ def test_app_profile_update_w_routing_any_multi_cluster_ids(): from google.cloud.bigtable_admin_v2.types import ( bigtable_instance_admin as messages_v2_pb2, ) - from google.cloud.bigtable.deprecated.enums import RoutingPolicyType + from google.cloud.bigtable.enums import RoutingPolicyType from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( BigtableInstanceAdminClient, ) @@ -684,7 +684,7 @@ def test_app_profile_update_w_routing_single(): from google.cloud.bigtable_admin_v2.types import ( bigtable_instance_admin as messages_v2_pb2, ) - from google.cloud.bigtable.deprecated.enums import RoutingPolicyType + from google.cloud.bigtable.enums import RoutingPolicyType from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( BigtableInstanceAdminClient, ) diff --git a/tests/unit/v2_client/test_backup.py b/tests/unit/v2_client/test_backup.py index 34cc8823a..9882ca339 100644 --- a/tests/unit/v2_client/test_backup.py +++ b/tests/unit/v2_client/test_backup.py @@ -48,7 +48,7 @@ def _make_table_admin_client(): def _make_backup(*args, **kwargs): - from google.cloud.bigtable.deprecated.backup import Backup + from google.cloud.bigtable.backup import Backup return Backup(*args, **kwargs) @@ -102,7 +102,7 @@ def test_backup_constructor_explicit(): def test_backup_from_pb_w_project_mismatch(): from google.cloud.bigtable_admin_v2.types import table - from google.cloud.bigtable.deprecated.backup import Backup + from google.cloud.bigtable.backup import Backup alt_project_id = "alt-project-id" client = _Client(project=alt_project_id) @@ -115,7 +115,7 @@ def test_backup_from_pb_w_project_mismatch(): def test_backup_from_pb_w_instance_mismatch(): from google.cloud.bigtable_admin_v2.types import table - from google.cloud.bigtable.deprecated.backup import Backup + from google.cloud.bigtable.backup import Backup alt_instance = "/projects/%s/instances/alt-instance" % PROJECT_ID client = _Client() @@ -128,7 +128,7 @@ def test_backup_from_pb_w_instance_mismatch(): def test_backup_from_pb_w_bad_name(): from google.cloud.bigtable_admin_v2.types import table - from google.cloud.bigtable.deprecated.backup import Backup + from google.cloud.bigtable.backup import Backup client = _Client() instance = _Instance(INSTANCE_NAME, client) @@ -139,10 +139,10 @@ def test_backup_from_pb_w_bad_name(): def test_backup_from_pb_success(): - from google.cloud.bigtable.deprecated.encryption_info import EncryptionInfo - from google.cloud.bigtable.deprecated.error import Status + from google.cloud.bigtable.encryption_info import EncryptionInfo + from google.cloud.bigtable.error import Status from google.cloud.bigtable_admin_v2.types import table - from google.cloud.bigtable.deprecated.backup import Backup + from google.cloud.bigtable.backup import Backup from google.cloud._helpers import _datetime_to_pb_timestamp from google.rpc.code_pb2 import Code @@ -190,7 +190,7 @@ def test_backup_from_pb_success(): def test_backup_name(): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( BigtableInstanceAdminClient, ) @@ -225,7 +225,7 @@ def test_backup_parent_none(): def test_backup_parent_w_cluster(): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( BigtableInstanceAdminClient, ) @@ -242,7 +242,7 @@ def test_backup_parent_w_cluster(): def test_backup_source_table_none(): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( BigtableInstanceAdminClient, ) @@ -258,7 +258,7 @@ def test_backup_source_table_none(): def test_backup_source_table_valid(): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( BigtableInstanceAdminClient, ) @@ -473,7 +473,7 @@ def test_backup_create_w_expire_time_not_set(): def test_backup_create_success(): from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable_admin_v2.types import table - from google.cloud.bigtable.deprecated import Client + from google.cloud.bigtable import Client op_future = object() credentials = _make_credentials() @@ -806,12 +806,12 @@ def test_backup_restore_to_another_instance(): def test_backup_get_iam_policy(): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( BigtableTableAdminClient, ) from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() client = Client(project=PROJECT_ID, credentials=credentials, admin=True) @@ -842,13 +842,13 @@ def test_backup_get_iam_policy(): def test_backup_set_iam_policy(): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( BigtableTableAdminClient, ) from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.deprecated.policy import Policy - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() client = Client(project=PROJECT_ID, credentials=credentials, admin=True) @@ -887,7 +887,7 @@ def test_backup_set_iam_policy(): def test_backup_test_iam_permissions(): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( BigtableTableAdminClient, ) diff --git a/tests/unit/v2_client/test_batcher.py b/tests/unit/v2_client/test_batcher.py index 0793ed480..ab511e030 100644 --- a/tests/unit/v2_client/test_batcher.py +++ b/tests/unit/v2_client/test_batcher.py @@ -14,122 +14,139 @@ import mock +import time + import pytest -from google.cloud.bigtable.deprecated.row import DirectRow +from google.cloud.bigtable.row import DirectRow +from google.cloud.bigtable.batcher import ( + _FlowControl, + MutationsBatcher, + MutationsBatchError, +) TABLE_ID = "table-id" TABLE_NAME = "/tables/" + TABLE_ID -def _make_mutation_batcher(table, **kw): - from google.cloud.bigtable.deprecated.batcher import MutationsBatcher - - return MutationsBatcher(table, **kw) +def test_mutation_batcher_constructor(): + table = _Table(TABLE_NAME) + with MutationsBatcher(table) as mutation_batcher: + assert table is mutation_batcher.table -def test_mutation_batcher_constructor(): +def test_mutation_batcher_w_user_callback(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table) - assert table is mutation_batcher.table + def callback_fn(response): + callback_fn.count = len(response) + + with MutationsBatcher( + table, flush_count=1, batch_completed_callback=callback_fn + ) as mutation_batcher: + rows = [ + DirectRow(row_key=b"row_key"), + DirectRow(row_key=b"row_key_2"), + DirectRow(row_key=b"row_key_3"), + DirectRow(row_key=b"row_key_4"), + ] + + mutation_batcher.mutate_rows(rows) + + assert callback_fn.count == 4 def test_mutation_batcher_mutate_row(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - rows = [ - DirectRow(row_key=b"row_key"), - DirectRow(row_key=b"row_key_2"), - DirectRow(row_key=b"row_key_3"), - DirectRow(row_key=b"row_key_4"), - ] + rows = [ + DirectRow(row_key=b"row_key"), + DirectRow(row_key=b"row_key_2"), + DirectRow(row_key=b"row_key_3"), + DirectRow(row_key=b"row_key_4"), + ] - mutation_batcher.mutate_rows(rows) - mutation_batcher.flush() + mutation_batcher.mutate_rows(rows) assert table.mutation_calls == 1 def test_mutation_batcher_mutate(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) - - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) + with MutationsBatcher(table=table) as mutation_batcher: - mutation_batcher.mutate(row) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) + row.set_cell("cf1", b"c4", 4) - mutation_batcher.flush() + mutation_batcher.mutate(row) assert table.mutation_calls == 1 def test_mutation_batcher_flush_w_no_rows(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) - mutation_batcher.flush() + with MutationsBatcher(table=table) as mutation_batcher: + mutation_batcher.flush() assert table.mutation_calls == 0 def test_mutation_batcher_mutate_w_max_flush_count(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table, flush_count=3) + with MutationsBatcher(table=table, flush_count=3) as mutation_batcher: - row_1 = DirectRow(row_key=b"row_key_1") - row_2 = DirectRow(row_key=b"row_key_2") - row_3 = DirectRow(row_key=b"row_key_3") + row_1 = DirectRow(row_key=b"row_key_1") + row_2 = DirectRow(row_key=b"row_key_2") + row_3 = DirectRow(row_key=b"row_key_3") - mutation_batcher.mutate(row_1) - mutation_batcher.mutate(row_2) - mutation_batcher.mutate(row_3) + mutation_batcher.mutate(row_1) + mutation_batcher.mutate(row_2) + mutation_batcher.mutate(row_3) assert table.mutation_calls == 1 -@mock.patch("google.cloud.bigtable.deprecated.batcher.MAX_MUTATIONS", new=3) -def test_mutation_batcher_mutate_with_max_mutations_failure(): - from google.cloud.bigtable.deprecated.batcher import MaxMutationsError - +@mock.patch("google.cloud.bigtable.batcher.MAX_OUTSTANDING_ELEMENTS", new=3) +def test_mutation_batcher_mutate_w_max_mutations(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) - with pytest.raises(MaxMutationsError): mutation_batcher.mutate(row) + assert table.mutation_calls == 1 + -@mock.patch("google.cloud.bigtable.deprecated.batcher.MAX_MUTATIONS", new=3) -def test_mutation_batcher_mutate_w_max_mutations(): +def test_mutation_batcher_mutate_w_max_row_bytes(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher( + table=table, max_row_bytes=3 * 1024 * 1024 + ) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes - mutation_batcher.mutate(row) - mutation_batcher.flush() + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", max_value) + row.set_cell("cf1", b"c2", max_value) + row.set_cell("cf1", b"c3", max_value) + + mutation_batcher.mutate(row) assert table.mutation_calls == 1 -def test_mutation_batcher_mutate_w_max_row_bytes(): +def test_mutations_batcher_flushed_when_closed(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher( - table=table, max_row_bytes=3 * 1024 * 1024 - ) + mutation_batcher = MutationsBatcher(table=table, max_row_bytes=3 * 1024 * 1024) number_of_bytes = 1 * 1024 * 1024 max_value = b"1" * number_of_bytes @@ -137,13 +154,107 @@ def test_mutation_batcher_mutate_w_max_row_bytes(): row = DirectRow(row_key=b"row_key") row.set_cell("cf1", b"c1", max_value) row.set_cell("cf1", b"c2", max_value) - row.set_cell("cf1", b"c3", max_value) mutation_batcher.mutate(row) + assert table.mutation_calls == 0 + + mutation_batcher.close() assert table.mutation_calls == 1 +def test_mutations_batcher_context_manager_flushed_when_closed(): + table = _Table(TABLE_NAME) + with MutationsBatcher( + table=table, max_row_bytes=3 * 1024 * 1024 + ) as mutation_batcher: + + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes + + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", max_value) + row.set_cell("cf1", b"c2", max_value) + + mutation_batcher.mutate(row) + + assert table.mutation_calls == 1 + + +@mock.patch("google.cloud.bigtable.batcher.MutationsBatcher.flush") +def test_mutations_batcher_flush_interval(mocked_flush): + table = _Table(TABLE_NAME) + flush_interval = 0.5 + mutation_batcher = MutationsBatcher(table=table, flush_interval=flush_interval) + + assert mutation_batcher._timer.interval == flush_interval + mocked_flush.assert_not_called() + + time.sleep(0.4) + mocked_flush.assert_not_called() + + time.sleep(0.1) + mocked_flush.assert_called_once_with() + + mutation_batcher.close() + + +def test_mutations_batcher_response_with_error_codes(): + from google.rpc.status_pb2 import Status + + mocked_response = [Status(code=1), Status(code=5)] + + table = mock.Mock() + mutation_batcher = MutationsBatcher(table=table) + + row1 = DirectRow(row_key=b"row_key") + row2 = DirectRow(row_key=b"row_key") + table.mutate_rows.return_value = mocked_response + + mutation_batcher.mutate_rows([row1, row2]) + with pytest.raises(MutationsBatchError) as exc: + mutation_batcher.close() + assert exc.value.message == "Errors in batch mutations." + assert len(exc.value.exc) == 2 + + assert exc.value.exc[0].message == mocked_response[0].message + assert exc.value.exc[1].message == mocked_response[1].message + + +def test_flow_control_event_is_set_when_not_blocked(): + flow_control = _FlowControl() + + flow_control.set_flow_control_status() + assert flow_control.event.is_set() + + +def test_flow_control_event_is_not_set_when_blocked(): + flow_control = _FlowControl() + + flow_control.inflight_mutations = flow_control.max_mutations + flow_control.inflight_size = flow_control.max_mutation_bytes + + flow_control.set_flow_control_status() + assert not flow_control.event.is_set() + + +@mock.patch("concurrent.futures.ThreadPoolExecutor.submit") +def test_flush_async_batch_count(mocked_executor_submit): + table = _Table(TABLE_NAME) + mutation_batcher = MutationsBatcher(table=table, flush_count=2) + + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes + for index in range(5): + row = DirectRow(row_key=f"row_key_{index}") + row.set_cell("cf1", b"c1", max_value) + mutation_batcher.mutate(row) + mutation_batcher._flush_async() + + # 3 batches submitted. 2 batches of 2 items, and the last one a single item batch. + assert mocked_executor_submit.call_count == 3 + + class _Instance(object): def __init__(self, client=None): self._client = client @@ -156,5 +267,8 @@ def __init__(self, name, client=None): self.mutation_calls = 0 def mutate_rows(self, rows): + from google.rpc.status_pb2 import Status + self.mutation_calls += 1 - return rows + + return [Status(code=0) for _ in rows] diff --git a/tests/unit/v2_client/test_client.py b/tests/unit/v2_client/test_client.py index 9deac6a25..5944c58a3 100644 --- a/tests/unit/v2_client/test_client.py +++ b/tests/unit/v2_client/test_client.py @@ -25,7 +25,7 @@ def _invoke_client_factory(client_class, **kw): - from google.cloud.bigtable.deprecated.client import _create_gapic_client + from google.cloud.bigtable.client import _create_gapic_client return _create_gapic_client(client_class, **kw) @@ -101,27 +101,23 @@ def __init__(self, credentials, emulator_host=None, emulator_channel=None): def _make_client(*args, **kwargs): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client return Client(*args, **kwargs) @mock.patch("os.environ", {}) def test_client_constructor_defaults(): - import warnings from google.api_core import client_info - from google.cloud.bigtable.deprecated import __version__ - from google.cloud.bigtable.deprecated.client import DATA_SCOPE + from google.cloud.bigtable import __version__ + from google.cloud.bigtable.client import DATA_SCOPE credentials = _make_credentials() - with warnings.catch_warnings(record=True) as warned: - with mock.patch("google.auth.default") as mocked: - mocked.return_value = credentials, PROJECT - client = _make_client() + with mock.patch("google.auth.default") as mocked: + mocked.return_value = credentials, PROJECT + client = _make_client() - # warn about client deprecation - assert len(warned) == 1 assert client.project == PROJECT assert client._credentials is credentials.with_scopes.return_value assert not client._read_only @@ -135,8 +131,8 @@ def test_client_constructor_defaults(): def test_client_constructor_explicit(): import warnings - from google.cloud.bigtable.deprecated.client import ADMIN_SCOPE - from google.cloud.bigtable.deprecated.client import DATA_SCOPE + from google.cloud.bigtable.client import ADMIN_SCOPE + from google.cloud.bigtable.client import DATA_SCOPE credentials = _make_credentials() client_info = mock.Mock() @@ -151,8 +147,7 @@ def test_client_constructor_explicit(): channel=mock.sentinel.channel, ) - # deprecationw arnning for channel and Client deprecation - assert len(warned) == 2 + assert len(warned) == 1 assert client.project == PROJECT assert client._credentials is credentials.with_scopes.return_value @@ -176,10 +171,8 @@ def test_client_constructor_w_both_admin_and_read_only(): def test_client_constructor_w_emulator_host(): from google.cloud.environment_vars import BIGTABLE_EMULATOR - from google.cloud.bigtable.deprecated.client import ( - _DEFAULT_BIGTABLE_EMULATOR_CLIENT, - ) - from google.cloud.bigtable.deprecated.client import _GRPC_CHANNEL_OPTIONS + from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT + from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS emulator_host = "localhost:8081" with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): @@ -202,7 +195,7 @@ def test_client_constructor_w_emulator_host(): def test_client_constructor_w_emulator_host_w_project(): from google.cloud.environment_vars import BIGTABLE_EMULATOR - from google.cloud.bigtable.deprecated.client import _GRPC_CHANNEL_OPTIONS + from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS emulator_host = "localhost:8081" with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): @@ -223,10 +216,8 @@ def test_client_constructor_w_emulator_host_w_project(): def test_client_constructor_w_emulator_host_w_credentials(): from google.cloud.environment_vars import BIGTABLE_EMULATOR - from google.cloud.bigtable.deprecated.client import ( - _DEFAULT_BIGTABLE_EMULATOR_CLIENT, - ) - from google.cloud.bigtable.deprecated.client import _GRPC_CHANNEL_OPTIONS + from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT + from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS emulator_host = "localhost:8081" credentials = _make_credentials() @@ -247,15 +238,15 @@ def test_client_constructor_w_emulator_host_w_credentials(): def test_client__get_scopes_default(): - from google.cloud.bigtable.deprecated.client import DATA_SCOPE + from google.cloud.bigtable.client import DATA_SCOPE client = _make_client(project=PROJECT, credentials=_make_credentials()) assert client._get_scopes() == (DATA_SCOPE,) def test_client__get_scopes_w_admin(): - from google.cloud.bigtable.deprecated.client import ADMIN_SCOPE - from google.cloud.bigtable.deprecated.client import DATA_SCOPE + from google.cloud.bigtable.client import ADMIN_SCOPE + from google.cloud.bigtable.client import DATA_SCOPE client = _make_client(project=PROJECT, credentials=_make_credentials(), admin=True) expected_scopes = (DATA_SCOPE, ADMIN_SCOPE) @@ -263,7 +254,7 @@ def test_client__get_scopes_w_admin(): def test_client__get_scopes_w_read_only(): - from google.cloud.bigtable.deprecated.client import READ_ONLY_SCOPE + from google.cloud.bigtable.client import READ_ONLY_SCOPE client = _make_client( project=PROJECT, credentials=_make_credentials(), read_only=True @@ -353,7 +344,7 @@ def test_client__local_composite_credentials(): def _create_gapic_client_channel_helper(endpoint=None, emulator_host=None): - from google.cloud.bigtable.deprecated.client import _GRPC_CHANNEL_OPTIONS + from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS client_class = mock.Mock(spec=["DEFAULT_ENDPOINT"]) credentials = _make_credentials() @@ -627,7 +618,7 @@ def test_client_instance_admin_client_initialized(): def test_client_instance_factory_defaults(): - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable.instance import Instance credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials) @@ -643,8 +634,8 @@ def test_client_instance_factory_defaults(): def test_client_instance_factory_non_defaults(): - from google.cloud.bigtable.deprecated.instance import Instance - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable import enums instance_type = enums.Instance.Type.DEVELOPMENT labels = {"foo": "bar"} @@ -674,7 +665,7 @@ def test_client_list_instances(): from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( BigtableInstanceAdminClient, ) - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable.instance import Instance FAILED_LOCATION = "FAILED" INSTANCE_ID1 = "instance-id1" @@ -726,7 +717,7 @@ def test_client_list_clusters(): bigtable_instance_admin as messages_v2_pb2, ) from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.instance import Cluster + from google.cloud.bigtable.instance import Cluster instance_api = mock.create_autospec(BigtableInstanceAdminClient) diff --git a/tests/unit/v2_client/test_cluster.py b/tests/unit/v2_client/test_cluster.py index e667c2af4..cb0312b0c 100644 --- a/tests/unit/v2_client/test_cluster.py +++ b/tests/unit/v2_client/test_cluster.py @@ -42,13 +42,13 @@ def _make_cluster(*args, **kwargs): - from google.cloud.bigtable.deprecated.cluster import Cluster + from google.cloud.bigtable.cluster import Cluster return Cluster(*args, **kwargs) def _make_client(*args, **kwargs): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client return Client(*args, **kwargs) @@ -72,8 +72,8 @@ def test_cluster_constructor_defaults(): def test_cluster_constructor_explicit(): - from google.cloud.bigtable.deprecated.enums import StorageType - from google.cloud.bigtable.deprecated.enums import Cluster + from google.cloud.bigtable.enums import StorageType + from google.cloud.bigtable.enums import Cluster STATE = Cluster.State.READY STORAGE_TYPE_SSD = StorageType.SSD @@ -126,8 +126,8 @@ def test_cluster_kms_key_name_setter(): def test_cluster_from_pb_success(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.cluster import Cluster - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable.cluster import Cluster + from google.cloud.bigtable import enums client = _Client(PROJECT) instance = _Instance(INSTANCE_ID, client) @@ -162,7 +162,7 @@ def test_cluster_from_pb_success(): def test_cluster_from_pb_w_bad_cluster_name(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.cluster import Cluster + from google.cloud.bigtable.cluster import Cluster bad_cluster_name = "BAD_NAME" @@ -174,7 +174,7 @@ def test_cluster_from_pb_w_bad_cluster_name(): def test_cluster_from_pb_w_instance_id_mistmatch(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.cluster import Cluster + from google.cloud.bigtable.cluster import Cluster ALT_INSTANCE_ID = "ALT_INSTANCE_ID" client = _Client(PROJECT) @@ -189,7 +189,7 @@ def test_cluster_from_pb_w_instance_id_mistmatch(): def test_cluster_from_pb_w_project_mistmatch(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.cluster import Cluster + from google.cloud.bigtable.cluster import Cluster ALT_PROJECT = "ALT_PROJECT" client = _Client(project=ALT_PROJECT) @@ -204,8 +204,8 @@ def test_cluster_from_pb_w_project_mistmatch(): def test_cluster_from_pb_w_autoscaling(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.cluster import Cluster - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable.cluster import Cluster + from google.cloud.bigtable import enums client = _Client(PROJECT) instance = _Instance(INSTANCE_ID, client) @@ -292,8 +292,8 @@ def _make_instance_admin_client(): def test_cluster_reload(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.enums import StorageType - from google.cloud.bigtable.deprecated.enums import Cluster + from google.cloud.bigtable.enums import StorageType + from google.cloud.bigtable.enums import Cluster credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -349,7 +349,7 @@ def test_cluster_reload(): def test_cluster_exists_hit(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable.instance import Instance credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -371,7 +371,7 @@ def test_cluster_exists_hit(): def test_cluster_exists_miss(): - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable.instance import Instance from google.api_core import exceptions credentials = _make_credentials() @@ -390,7 +390,7 @@ def test_cluster_exists_miss(): def test_cluster_exists_w_error(): - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable.instance import Instance from google.api_core import exceptions credentials = _make_credentials() @@ -416,9 +416,9 @@ def test_cluster_create(): bigtable_instance_admin as messages_v2_pb2, ) from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable.instance import Instance from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 - from google.cloud.bigtable.deprecated.enums import StorageType + from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) @@ -471,9 +471,9 @@ def test_cluster_create_w_cmek(): bigtable_instance_admin as messages_v2_pb2, ) from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable.instance import Instance from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 - from google.cloud.bigtable.deprecated.enums import StorageType + from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) @@ -531,9 +531,9 @@ def test_cluster_create_w_autoscaling(): bigtable_instance_admin as messages_v2_pb2, ) from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable.instance import Instance from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 - from google.cloud.bigtable.deprecated.enums import StorageType + from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) @@ -600,7 +600,7 @@ def test_cluster_update(): from google.cloud.bigtable_admin_v2.types import ( bigtable_instance_admin as messages_v2_pb2, ) - from google.cloud.bigtable.deprecated.enums import StorageType + from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) @@ -667,7 +667,7 @@ def test_cluster_update_w_autoscaling(): from google.cloud.bigtable_admin_v2.types import ( bigtable_instance_admin as messages_v2_pb2, ) - from google.cloud.bigtable.deprecated.enums import StorageType + from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) @@ -726,7 +726,7 @@ def test_cluster_update_w_partial_autoscaling_config(): from google.cloud.bigtable_admin_v2.types import ( bigtable_instance_admin as messages_v2_pb2, ) - from google.cloud.bigtable.deprecated.enums import StorageType + from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) @@ -811,7 +811,7 @@ def test_cluster_update_w_both_manual_and_autoscaling(): from google.cloud.bigtable_admin_v2.types import ( bigtable_instance_admin as messages_v2_pb2, ) - from google.cloud.bigtable.deprecated.enums import StorageType + from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) @@ -871,8 +871,8 @@ def test_cluster_disable_autoscaling(): bigtable_instance_admin as messages_v2_pb2, ) from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable.deprecated.instance import Instance - from google.cloud.bigtable.deprecated.enums import StorageType + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) @@ -928,8 +928,8 @@ def test_cluster_disable_autoscaling(): def test_create_cluster_with_both_manual_and_autoscaling(): - from google.cloud.bigtable.deprecated.instance import Instance - from google.cloud.bigtable.deprecated.enums import StorageType + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.enums import StorageType credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -956,8 +956,8 @@ def test_create_cluster_with_both_manual_and_autoscaling(): def test_create_cluster_with_partial_autoscaling_config(): - from google.cloud.bigtable.deprecated.instance import Instance - from google.cloud.bigtable.deprecated.enums import StorageType + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.enums import StorageType credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -997,8 +997,8 @@ def test_create_cluster_with_partial_autoscaling_config(): def test_create_cluster_with_no_scaling_config(): - from google.cloud.bigtable.deprecated.instance import Instance - from google.cloud.bigtable.deprecated.enums import StorageType + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.enums import StorageType credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) diff --git a/tests/unit/v2_client/test_column_family.py b/tests/unit/v2_client/test_column_family.py index d16d2b20c..b164b2fc1 100644 --- a/tests/unit/v2_client/test_column_family.py +++ b/tests/unit/v2_client/test_column_family.py @@ -19,7 +19,7 @@ def _make_max_versions_gc_rule(*args, **kwargs): - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule return MaxVersionsGCRule(*args, **kwargs) @@ -51,7 +51,7 @@ def test_max_versions_gc_rule_to_pb(): def _make_max_age_gc_rule(*args, **kwargs): - from google.cloud.bigtable.deprecated.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxAgeGCRule return MaxAgeGCRule(*args, **kwargs) @@ -89,7 +89,7 @@ def test_max_age_gc_rule_to_pb(): def _make_gc_rule_union(*args, **kwargs): - from google.cloud.bigtable.deprecated.column_family import GCRuleUnion + from google.cloud.bigtable.column_family import GCRuleUnion return GCRuleUnion(*args, **kwargs) @@ -124,8 +124,8 @@ def test_gc_rule_union___ne__same_value(): def test_gc_rule_union_to_pb(): import datetime from google.protobuf import duration_pb2 - from google.cloud.bigtable.deprecated.column_family import MaxAgeGCRule - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule max_num_versions = 42 rule1 = MaxVersionsGCRule(max_num_versions) @@ -145,8 +145,8 @@ def test_gc_rule_union_to_pb(): def test_gc_rule_union_to_pb_nested(): import datetime from google.protobuf import duration_pb2 - from google.cloud.bigtable.deprecated.column_family import MaxAgeGCRule - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule max_num_versions1 = 42 rule1 = MaxVersionsGCRule(max_num_versions1) @@ -171,7 +171,7 @@ def test_gc_rule_union_to_pb_nested(): def _make_gc_rule_intersection(*args, **kwargs): - from google.cloud.bigtable.deprecated.column_family import GCRuleIntersection + from google.cloud.bigtable.column_family import GCRuleIntersection return GCRuleIntersection(*args, **kwargs) @@ -206,8 +206,8 @@ def test_gc_rule_intersection___ne__same_value(): def test_gc_rule_intersection_to_pb(): import datetime from google.protobuf import duration_pb2 - from google.cloud.bigtable.deprecated.column_family import MaxAgeGCRule - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule max_num_versions = 42 rule1 = MaxVersionsGCRule(max_num_versions) @@ -227,8 +227,8 @@ def test_gc_rule_intersection_to_pb(): def test_gc_rule_intersection_to_pb_nested(): import datetime from google.protobuf import duration_pb2 - from google.cloud.bigtable.deprecated.column_family import MaxAgeGCRule - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule max_num_versions1 = 42 rule1 = MaxVersionsGCRule(max_num_versions1) @@ -253,13 +253,13 @@ def test_gc_rule_intersection_to_pb_nested(): def _make_column_family(*args, **kwargs): - from google.cloud.bigtable.deprecated.column_family import ColumnFamily + from google.cloud.bigtable.column_family import ColumnFamily return ColumnFamily(*args, **kwargs) def _make_client(*args, **kwargs): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client return Client(*args, **kwargs) @@ -323,7 +323,7 @@ def test_column_family_to_pb_no_rules(): def test_column_family_to_pb_with_rule(): - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule gc_rule = MaxVersionsGCRule(1) column_family = _make_column_family("column_family_id", None, gc_rule=gc_rule) @@ -397,7 +397,7 @@ def test_column_family_create(): def test_column_family_create_with_gc_rule(): - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule gc_rule = MaxVersionsGCRule(1337) _create_test_helper(gc_rule=gc_rule) @@ -467,7 +467,7 @@ def test_column_family_update(): def test_column_family_update_with_gc_rule(): - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule gc_rule = MaxVersionsGCRule(1337) _update_test_helper(gc_rule=gc_rule) @@ -530,15 +530,15 @@ def test_column_family_delete(): def test__gc_rule_from_pb_empty(): - from google.cloud.bigtable.deprecated.column_family import _gc_rule_from_pb + from google.cloud.bigtable.column_family import _gc_rule_from_pb gc_rule_pb = _GcRulePB() assert _gc_rule_from_pb(gc_rule_pb) is None def test__gc_rule_from_pb_max_num_versions(): - from google.cloud.bigtable.deprecated.column_family import _gc_rule_from_pb - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import _gc_rule_from_pb + from google.cloud.bigtable.column_family import MaxVersionsGCRule orig_rule = MaxVersionsGCRule(1) gc_rule_pb = orig_rule.to_pb() @@ -549,8 +549,8 @@ def test__gc_rule_from_pb_max_num_versions(): def test__gc_rule_from_pb_max_age(): import datetime - from google.cloud.bigtable.deprecated.column_family import _gc_rule_from_pb - from google.cloud.bigtable.deprecated.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import _gc_rule_from_pb + from google.cloud.bigtable.column_family import MaxAgeGCRule orig_rule = MaxAgeGCRule(datetime.timedelta(seconds=1)) gc_rule_pb = orig_rule.to_pb() @@ -561,10 +561,10 @@ def test__gc_rule_from_pb_max_age(): def test__gc_rule_from_pb_union(): import datetime - from google.cloud.bigtable.deprecated.column_family import _gc_rule_from_pb - from google.cloud.bigtable.deprecated.column_family import GCRuleUnion - from google.cloud.bigtable.deprecated.column_family import MaxAgeGCRule - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import _gc_rule_from_pb + from google.cloud.bigtable.column_family import GCRuleUnion + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule rule1 = MaxVersionsGCRule(1) rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) @@ -577,10 +577,10 @@ def test__gc_rule_from_pb_union(): def test__gc_rule_from_pb_intersection(): import datetime - from google.cloud.bigtable.deprecated.column_family import _gc_rule_from_pb - from google.cloud.bigtable.deprecated.column_family import GCRuleIntersection - from google.cloud.bigtable.deprecated.column_family import MaxAgeGCRule - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import _gc_rule_from_pb + from google.cloud.bigtable.column_family import GCRuleIntersection + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule rule1 = MaxVersionsGCRule(1) rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) @@ -592,7 +592,7 @@ def test__gc_rule_from_pb_intersection(): def test__gc_rule_from_pb_unknown_field_name(): - from google.cloud.bigtable.deprecated.column_family import _gc_rule_from_pb + from google.cloud.bigtable.column_family import _gc_rule_from_pb class MockProto(object): diff --git a/tests/unit/v2_client/test_encryption_info.py b/tests/unit/v2_client/test_encryption_info.py index 0b6a93e9e..8b92a83ed 100644 --- a/tests/unit/v2_client/test_encryption_info.py +++ b/tests/unit/v2_client/test_encryption_info.py @@ -14,7 +14,7 @@ import mock -from google.cloud.bigtable.deprecated import enums +from google.cloud.bigtable import enums EncryptionType = enums.EncryptionInfo.EncryptionType @@ -30,7 +30,7 @@ def _make_status_pb(code=_STATUS_CODE, message=_STATUS_MESSAGE): def _make_status(code=_STATUS_CODE, message=_STATUS_MESSAGE): - from google.cloud.bigtable.deprecated.error import Status + from google.cloud.bigtable.error import Status status_pb = _make_status_pb(code=code, message=message) return Status(status_pb) @@ -54,7 +54,7 @@ def _make_info_pb( def _make_encryption_info(*args, **kwargs): - from google.cloud.bigtable.deprecated.encryption_info import EncryptionInfo + from google.cloud.bigtable.encryption_info import EncryptionInfo return EncryptionInfo(*args, **kwargs) @@ -70,7 +70,7 @@ def _make_encryption_info_defaults( def test_encryption_info__from_pb(): - from google.cloud.bigtable.deprecated.encryption_info import EncryptionInfo + from google.cloud.bigtable.encryption_info import EncryptionInfo info_pb = _make_info_pb() diff --git a/tests/unit/v2_client/test_error.py b/tests/unit/v2_client/test_error.py index 072a3b3c3..8b148473c 100644 --- a/tests/unit/v2_client/test_error.py +++ b/tests/unit/v2_client/test_error.py @@ -20,7 +20,7 @@ def _make_status_pb(**kwargs): def _make_status(status_pb): - from google.cloud.bigtable.deprecated.error import Status + from google.cloud.bigtable.error import Status return Status(status_pb) diff --git a/tests/unit/v2_client/test_instance.py b/tests/unit/v2_client/test_instance.py index b43e8bb38..c577adca5 100644 --- a/tests/unit/v2_client/test_instance.py +++ b/tests/unit/v2_client/test_instance.py @@ -17,7 +17,7 @@ import pytest from ._testing import _make_credentials -from google.cloud.bigtable.deprecated.cluster import Cluster +from google.cloud.bigtable.cluster import Cluster PROJECT = "project" INSTANCE_ID = "instance-id" @@ -47,7 +47,7 @@ def _make_client(*args, **kwargs): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client return Client(*args, **kwargs) @@ -61,7 +61,7 @@ def _make_instance_admin_api(): def _make_instance(*args, **kwargs): - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable.instance import Instance return Instance(*args, **kwargs) @@ -79,7 +79,7 @@ def test_instance_constructor_defaults(): def test_instance_constructor_non_default(): - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import enums instance_type = enums.Instance.Type.DEVELOPMENT state = enums.Instance.State.READY @@ -104,7 +104,7 @@ def test_instance_constructor_non_default(): def test_instance__update_from_pb_success(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import enums instance_type = data_v2_pb2.Instance.Type.PRODUCTION state = enums.Instance.State.READY @@ -129,7 +129,7 @@ def test_instance__update_from_pb_success(): def test_instance__update_from_pb_success_defaults(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import enums instance_pb = data_v2_pb2.Instance(display_name=DISPLAY_NAME) @@ -156,8 +156,8 @@ def test_instance__update_from_pb_wo_display_name(): def test_instance_from_pb_success(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated import enums - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable import enums + from google.cloud.bigtable.instance import Instance credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -184,7 +184,7 @@ def test_instance_from_pb_success(): def test_instance_from_pb_bad_instance_name(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable.instance import Instance instance_name = "INCORRECT_FORMAT" instance_pb = data_v2_pb2.Instance(name=instance_name) @@ -195,7 +195,7 @@ def test_instance_from_pb_bad_instance_name(): def test_instance_from_pb_project_mistmatch(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable.instance import Instance ALT_PROJECT = "ALT_PROJECT" credentials = _make_credentials() @@ -304,7 +304,7 @@ def _instance_api_response_for_create(): def test_instance_create(): - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import enums from google.cloud.bigtable_admin_v2.types import Instance from google.cloud.bigtable_admin_v2.types import Cluster import warnings @@ -353,8 +353,8 @@ def test_instance_create(): def test_instance_create_w_clusters(): - from google.cloud.bigtable.deprecated import enums - from google.cloud.bigtable.deprecated.cluster import Cluster + from google.cloud.bigtable import enums + from google.cloud.bigtable.cluster import Cluster from google.cloud.bigtable_admin_v2.types import Cluster as cluster_pb from google.cloud.bigtable_admin_v2.types import Instance as instance_pb @@ -473,7 +473,7 @@ def test_instance_exists_w_error(): def test_instance_reload(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import enums DISPLAY_NAME = "hey-hi-hello" credentials = _make_credentials() @@ -527,7 +527,7 @@ def _instance_api_response_for_update(): def test_instance_update(): - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import enums from google.protobuf import field_mask_pb2 from google.cloud.bigtable_admin_v2.types import Instance @@ -603,7 +603,7 @@ def test_instance_delete(): def test_instance_get_iam_policy(): from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -631,7 +631,7 @@ def test_instance_get_iam_policy(): def test_instance_get_iam_policy_w_requested_policy_version(): from google.iam.v1 import policy_pb2, options_pb2 - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -665,8 +665,8 @@ def test_instance_get_iam_policy_w_requested_policy_version(): def test_instance_set_iam_policy(): from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.deprecated.policy import Policy - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -721,7 +721,7 @@ def test_instance_test_iam_permissions(): def test_instance_cluster_factory(): - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import enums CLUSTER_ID = "{}-cluster".format(INSTANCE_ID) LOCATION_ID = "us-central1-c" @@ -749,8 +749,8 @@ def test_instance_list_clusters(): bigtable_instance_admin as messages_v2_pb2, ) from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.instance import Instance - from google.cloud.bigtable.deprecated.instance import Cluster + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.instance import Cluster credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -788,7 +788,7 @@ def test_instance_list_clusters(): def test_instance_table_factory(): - from google.cloud.bigtable.deprecated.table import Table + from google.cloud.bigtable.table import Table app_profile_id = "appProfileId1262094415" instance = _make_instance(INSTANCE_ID, None) @@ -857,7 +857,7 @@ def test_instance_list_tables_failure_name_bad_before(): def test_instance_app_profile_factory(): - from google.cloud.bigtable.deprecated.enums import RoutingPolicyType + from google.cloud.bigtable.enums import RoutingPolicyType instance = _make_instance(INSTANCE_ID, None) @@ -890,7 +890,7 @@ def test_instance_list_app_profiles(): from google.api_core.page_iterator import Iterator from google.api_core.page_iterator import Page from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.deprecated.app_profile import AppProfile + from google.cloud.bigtable.app_profile import AppProfile class _Iterator(Iterator): def __init__(self, pages): diff --git a/tests/unit/v2_client/test_policy.py b/tests/unit/v2_client/test_policy.py index ef3df2d2b..77674517e 100644 --- a/tests/unit/v2_client/test_policy.py +++ b/tests/unit/v2_client/test_policy.py @@ -14,7 +14,7 @@ def _make_policy(*args, **kw): - from google.cloud.bigtable.deprecated.policy import Policy + from google.cloud.bigtable.policy import Policy return Policy(*args, **kw) @@ -48,7 +48,7 @@ def test_policy_ctor_explicit(): def test_policy_bigtable_admins(): - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE MEMBER = "user:phred@example.com" expected = frozenset([MEMBER]) @@ -58,7 +58,7 @@ def test_policy_bigtable_admins(): def test_policy_bigtable_readers(): - from google.cloud.bigtable.deprecated.policy import BIGTABLE_READER_ROLE + from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE MEMBER = "user:phred@example.com" expected = frozenset([MEMBER]) @@ -68,7 +68,7 @@ def test_policy_bigtable_readers(): def test_policy_bigtable_users(): - from google.cloud.bigtable.deprecated.policy import BIGTABLE_USER_ROLE + from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE MEMBER = "user:phred@example.com" expected = frozenset([MEMBER]) @@ -78,7 +78,7 @@ def test_policy_bigtable_users(): def test_policy_bigtable_viewers(): - from google.cloud.bigtable.deprecated.policy import BIGTABLE_VIEWER_ROLE + from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE MEMBER = "user:phred@example.com" expected = frozenset([MEMBER]) @@ -89,7 +89,7 @@ def test_policy_bigtable_viewers(): def test_policy_from_pb_w_empty(): from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.deprecated.policy import Policy + from google.cloud.bigtable.policy import Policy empty = frozenset() message = policy_pb2.Policy() @@ -106,8 +106,8 @@ def test_policy_from_pb_w_empty(): def test_policy_from_pb_w_non_empty(): from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE - from google.cloud.bigtable.deprecated.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import Policy ETAG = b"ETAG" VERSION = 1 @@ -133,8 +133,8 @@ def test_policy_from_pb_w_condition(): import pytest from google.iam.v1 import policy_pb2 from google.api_core.iam import InvalidOperationException, _DICT_ACCESS_MSG - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE - from google.cloud.bigtable.deprecated.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import Policy ETAG = b"ETAG" VERSION = 3 @@ -184,7 +184,7 @@ def test_policy_to_pb_empty(): def test_policy_to_pb_explicit(): from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE VERSION = 1 ETAG = b"ETAG" @@ -204,7 +204,7 @@ def test_policy_to_pb_explicit(): def test_policy_to_pb_w_condition(): from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE VERSION = 3 ETAG = b"ETAG" @@ -234,7 +234,7 @@ def test_policy_to_pb_w_condition(): def test_policy_from_api_repr_wo_etag(): - from google.cloud.bigtable.deprecated.policy import Policy + from google.cloud.bigtable.policy import Policy VERSION = 1 empty = frozenset() @@ -252,7 +252,7 @@ def test_policy_from_api_repr_wo_etag(): def test_policy_from_api_repr_w_etag(): import base64 - from google.cloud.bigtable.deprecated.policy import Policy + from google.cloud.bigtable.policy import Policy ETAG = b"ETAG" empty = frozenset() diff --git a/tests/unit/v2_client/test_row.py b/tests/unit/v2_client/test_row.py index 4850b18c3..f04802f5c 100644 --- a/tests/unit/v2_client/test_row.py +++ b/tests/unit/v2_client/test_row.py @@ -20,13 +20,13 @@ def _make_client(*args, **kwargs): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client return Client(*args, **kwargs) def _make_row(*args, **kwargs): - from google.cloud.bigtable.deprecated.row import Row + from google.cloud.bigtable.row import Row return Row(*args, **kwargs) @@ -42,7 +42,7 @@ def test_row_table_getter(): def _make__set_delete_row(*args, **kwargs): - from google.cloud.bigtable.deprecated.row import _SetDeleteRow + from google.cloud.bigtable.row import _SetDeleteRow return _SetDeleteRow(*args, **kwargs) @@ -54,7 +54,7 @@ def test__set_detlete_row__get_mutations_virtual(): def _make_direct_row(*args, **kwargs): - from google.cloud.bigtable.deprecated.row import DirectRow + from google.cloud.bigtable.row import DirectRow return DirectRow(*args, **kwargs) @@ -193,7 +193,7 @@ def test_direct_row_delete(): def test_direct_row_delete_cell(): - from google.cloud.bigtable.deprecated.row import DirectRow + from google.cloud.bigtable.row import DirectRow class MockRow(DirectRow): def __init__(self, *args, **kwargs): @@ -237,7 +237,7 @@ def test_direct_row_delete_cells_non_iterable(): def test_direct_row_delete_cells_all_columns(): - from google.cloud.bigtable.deprecated.row import DirectRow + from google.cloud.bigtable.row import DirectRow row_key = b"row_key" column_family_id = "column_family_id" @@ -293,7 +293,7 @@ def test_direct_row_delete_cells_no_time_range(): def test_direct_row_delete_cells_with_time_range(): import datetime from google.cloud._helpers import _EPOCH - from google.cloud.bigtable.deprecated.row_filters import TimestampRange + from google.cloud.bigtable.row_filters import TimestampRange microseconds = 30871000 # Makes sure already milliseconds granularity start = _EPOCH + datetime.timedelta(microseconds=microseconds) @@ -386,7 +386,7 @@ def test_direct_row_commit_with_exception(): def _make_conditional_row(*args, **kwargs): - from google.cloud.bigtable.deprecated.row import ConditionalRow + from google.cloud.bigtable.row import ConditionalRow return ConditionalRow(*args, **kwargs) @@ -417,7 +417,7 @@ def test_conditional_row__get_mutations(): def test_conditional_row_commit(): - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import RowSampleFilter from google.cloud.bigtable_v2.services.bigtable import BigtableClient project_id = "project-id" @@ -466,7 +466,7 @@ def test_conditional_row_commit(): def test_conditional_row_commit_too_many_mutations(): from google.cloud._testing import _Monkey - from google.cloud.bigtable.deprecated import row as MUT + from google.cloud.bigtable import row as MUT row_key = b"row_key" table = object() @@ -504,7 +504,7 @@ def test_conditional_row_commit_no_mutations(): def _make_append_row(*args, **kwargs): - from google.cloud.bigtable.deprecated.row import AppendRow + from google.cloud.bigtable.row import AppendRow return AppendRow(*args, **kwargs) @@ -564,7 +564,7 @@ def test_append_row_increment_cell_value(): def test_append_row_commit(): from google.cloud._testing import _Monkey - from google.cloud.bigtable.deprecated import row as MUT + from google.cloud.bigtable import row as MUT from google.cloud.bigtable_v2.services.bigtable import BigtableClient project_id = "project-id" @@ -630,7 +630,7 @@ def test_append_row_commit_no_rules(): def test_append_row_commit_too_many_mutations(): from google.cloud._testing import _Monkey - from google.cloud.bigtable.deprecated import row as MUT + from google.cloud.bigtable import row as MUT row_key = b"row_key" table = object() @@ -644,7 +644,7 @@ def test_append_row_commit_too_many_mutations(): def test__parse_rmw_row_response(): from google.cloud._helpers import _datetime_from_microseconds - from google.cloud.bigtable.deprecated.row import _parse_rmw_row_response + from google.cloud.bigtable.row import _parse_rmw_row_response col_fam1 = "col-fam-id" col_fam2 = "col-fam-id2" @@ -700,7 +700,7 @@ def test__parse_rmw_row_response(): def test__parse_family_pb(): from google.cloud._helpers import _datetime_from_microseconds - from google.cloud.bigtable.deprecated.row import _parse_family_pb + from google.cloud.bigtable.row import _parse_family_pb col_fam1 = "col-fam-id" col_name1 = b"col-name1" diff --git a/tests/unit/v2_client/test_row_data.py b/tests/unit/v2_client/test_row_data.py index ee9b065c8..fba69ceba 100644 --- a/tests/unit/v2_client/test_row_data.py +++ b/tests/unit/v2_client/test_row_data.py @@ -27,7 +27,7 @@ def _make_cell(*args, **kwargs): - from google.cloud.bigtable.deprecated.row_data import Cell + from google.cloud.bigtable.row_data import Cell return Cell(*args, **kwargs) @@ -36,7 +36,7 @@ def _cell_from_pb_test_helper(labels=None): import datetime from google.cloud._helpers import _EPOCH from google.cloud.bigtable_v2.types import data as data_v2_pb2 - from google.cloud.bigtable.deprecated.row_data import Cell + from google.cloud.bigtable.row_data import Cell timestamp = _EPOCH + datetime.timedelta(microseconds=TIMESTAMP_MICROS) value = b"value-bytes" @@ -100,7 +100,7 @@ def test_cell___ne__(): def _make_partial_row_data(*args, **kwargs): - from google.cloud.bigtable.deprecated.row_data import PartialRowData + from google.cloud.bigtable.row_data import PartialRowData return PartialRowData(*args, **kwargs) @@ -288,7 +288,7 @@ def trailing_metadata(self): def test__retry_read_rows_exception_miss(): from google.api_core.exceptions import Conflict - from google.cloud.bigtable.deprecated.row_data import _retry_read_rows_exception + from google.cloud.bigtable.row_data import _retry_read_rows_exception exception = Conflict("testing") assert not _retry_read_rows_exception(exception) @@ -296,7 +296,7 @@ def test__retry_read_rows_exception_miss(): def test__retry_read_rows_exception_service_unavailable(): from google.api_core.exceptions import ServiceUnavailable - from google.cloud.bigtable.deprecated.row_data import _retry_read_rows_exception + from google.cloud.bigtable.row_data import _retry_read_rows_exception exception = ServiceUnavailable("testing") assert _retry_read_rows_exception(exception) @@ -304,7 +304,7 @@ def test__retry_read_rows_exception_service_unavailable(): def test__retry_read_rows_exception_deadline_exceeded(): from google.api_core.exceptions import DeadlineExceeded - from google.cloud.bigtable.deprecated.row_data import _retry_read_rows_exception + from google.cloud.bigtable.row_data import _retry_read_rows_exception exception = DeadlineExceeded("testing") assert _retry_read_rows_exception(exception) @@ -312,7 +312,7 @@ def test__retry_read_rows_exception_deadline_exceeded(): def test__retry_read_rows_exception_internal_server_not_retriable(): from google.api_core.exceptions import InternalServerError - from google.cloud.bigtable.deprecated.row_data import ( + from google.cloud.bigtable.row_data import ( _retry_read_rows_exception, RETRYABLE_INTERNAL_ERROR_MESSAGES, ) @@ -325,7 +325,7 @@ def test__retry_read_rows_exception_internal_server_not_retriable(): def test__retry_read_rows_exception_internal_server_retriable(): from google.api_core.exceptions import InternalServerError - from google.cloud.bigtable.deprecated.row_data import ( + from google.cloud.bigtable.row_data import ( _retry_read_rows_exception, RETRYABLE_INTERNAL_ERROR_MESSAGES, ) @@ -337,7 +337,7 @@ def test__retry_read_rows_exception_internal_server_retriable(): def test__retry_read_rows_exception_miss_wrapped_in_grpc(): from google.api_core.exceptions import Conflict - from google.cloud.bigtable.deprecated.row_data import _retry_read_rows_exception + from google.cloud.bigtable.row_data import _retry_read_rows_exception wrapped = Conflict("testing") exception = _make_grpc_call_error(wrapped) @@ -346,7 +346,7 @@ def test__retry_read_rows_exception_miss_wrapped_in_grpc(): def test__retry_read_rows_exception_service_unavailable_wrapped_in_grpc(): from google.api_core.exceptions import ServiceUnavailable - from google.cloud.bigtable.deprecated.row_data import _retry_read_rows_exception + from google.cloud.bigtable.row_data import _retry_read_rows_exception wrapped = ServiceUnavailable("testing") exception = _make_grpc_call_error(wrapped) @@ -355,7 +355,7 @@ def test__retry_read_rows_exception_service_unavailable_wrapped_in_grpc(): def test__retry_read_rows_exception_deadline_exceeded_wrapped_in_grpc(): from google.api_core.exceptions import DeadlineExceeded - from google.cloud.bigtable.deprecated.row_data import _retry_read_rows_exception + from google.cloud.bigtable.row_data import _retry_read_rows_exception wrapped = DeadlineExceeded("testing") exception = _make_grpc_call_error(wrapped) @@ -363,7 +363,7 @@ def test__retry_read_rows_exception_deadline_exceeded_wrapped_in_grpc(): def _make_partial_rows_data(*args, **kwargs): - from google.cloud.bigtable.deprecated.row_data import PartialRowsData + from google.cloud.bigtable.row_data import PartialRowsData return PartialRowsData(*args, **kwargs) @@ -373,13 +373,13 @@ def _partial_rows_data_consume_all(yrd): def _make_client(*args, **kwargs): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client return Client(*args, **kwargs) def test_partial_rows_data_constructor(): - from google.cloud.bigtable.deprecated.row_data import DEFAULT_RETRY_READ_ROWS + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS client = _Client() client._data_stub = mock.MagicMock() @@ -436,7 +436,7 @@ def fake_read(*args, **kwargs): def test_partial_rows_data_constructor_with_retry(): - from google.cloud.bigtable.deprecated.row_data import DEFAULT_RETRY_READ_ROWS + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS client = _Client() client._data_stub = mock.MagicMock() @@ -446,7 +446,9 @@ def test_partial_rows_data_constructor_with_retry(): client._data_stub.ReadRows, request, retry ) partial_rows_data.read_method.assert_called_once_with( - request, timeout=DEFAULT_RETRY_READ_ROWS.deadline + 1 + request, + timeout=DEFAULT_RETRY_READ_ROWS.deadline + 1, + retry=DEFAULT_RETRY_READ_ROWS, ) assert partial_rows_data.request is request assert partial_rows_data.rows == {} @@ -644,7 +646,7 @@ def test_partial_rows_data_valid_last_scanned_row_key_on_start(): def test_partial_rows_data_invalid_empty_chunk(): - from google.cloud.bigtable.deprecated.row_data import InvalidChunk + from google.cloud.bigtable.row_data import InvalidChunk from google.cloud.bigtable_v2.services.bigtable import BigtableClient client = _Client() @@ -755,14 +757,14 @@ def test_partial_rows_data_yield_retry_rows_data(): def _make_read_rows_request_manager(*args, **kwargs): - from google.cloud.bigtable.deprecated.row_data import _ReadRowsRequestManager + from google.cloud.bigtable.row_data import _ReadRowsRequestManager return _ReadRowsRequestManager(*args, **kwargs) @pytest.fixture(scope="session") def rrrm_data(): - from google.cloud.bigtable.deprecated import row_set + from google.cloud.bigtable import row_set row_range1 = row_set.RowRange(b"row_key21", b"row_key29") row_range2 = row_set.RowRange(b"row_key31", b"row_key39") @@ -851,7 +853,7 @@ def test_RRRM__filter_row_ranges_all_ranges_already_read(rrrm_data): def test_RRRM__filter_row_ranges_all_ranges_already_read_open_closed(): - from google.cloud.bigtable.deprecated import row_set + from google.cloud.bigtable import row_set last_scanned_key = b"row_key54" @@ -895,7 +897,7 @@ def test_RRRM__filter_row_ranges_some_ranges_already_read(rrrm_data): def test_RRRM_build_updated_request(rrrm_data): - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import RowSampleFilter from google.cloud.bigtable_v2 import types row_range1 = rrrm_data["row_range1"] @@ -944,7 +946,7 @@ def test_RRRM_build_updated_request_full_table(): def test_RRRM_build_updated_request_no_start_key(): - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import RowSampleFilter from google.cloud.bigtable_v2 import types row_filter = RowSampleFilter(0.33) @@ -972,7 +974,7 @@ def test_RRRM_build_updated_request_no_start_key(): def test_RRRM_build_updated_request_no_end_key(): - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import RowSampleFilter from google.cloud.bigtable_v2 import types row_filter = RowSampleFilter(0.33) @@ -998,7 +1000,7 @@ def test_RRRM_build_updated_request_no_end_key(): def test_RRRM_build_updated_request_rows(): - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import RowSampleFilter row_filter = RowSampleFilter(0.33) last_scanned_key = b"row_key4" @@ -1046,7 +1048,7 @@ def test_RRRM__key_already_read(): def test_RRRM__rows_limit_reached(): - from google.cloud.bigtable.deprecated.row_data import InvalidRetryRequest + from google.cloud.bigtable.row_data import InvalidRetryRequest last_scanned_key = b"row_key14" request = _ReadRowsRequestPB(table_name=TABLE_NAME) @@ -1059,7 +1061,7 @@ def test_RRRM__rows_limit_reached(): def test_RRRM_build_updated_request_last_row_read_raises_invalid_retry_request(): - from google.cloud.bigtable.deprecated.row_data import InvalidRetryRequest + from google.cloud.bigtable.row_data import InvalidRetryRequest last_scanned_key = b"row_key4" request = _ReadRowsRequestPB(table_name=TABLE_NAME) @@ -1073,8 +1075,8 @@ def test_RRRM_build_updated_request_last_row_read_raises_invalid_retry_request() def test_RRRM_build_updated_request_row_ranges_read_raises_invalid_retry_request(): - from google.cloud.bigtable.deprecated.row_data import InvalidRetryRequest - from google.cloud.bigtable.deprecated import row_set + from google.cloud.bigtable.row_data import InvalidRetryRequest + from google.cloud.bigtable import row_set row_range1 = row_set.RowRange(b"row_key21", b"row_key29") @@ -1095,7 +1097,7 @@ def test_RRRM_build_updated_request_row_ranges_read_raises_invalid_retry_request def test_RRRM_build_updated_request_row_ranges_valid(): - from google.cloud.bigtable.deprecated import row_set + from google.cloud.bigtable import row_set row_range1 = row_set.RowRange(b"row_key21", b"row_key29") @@ -1179,7 +1181,7 @@ def _ReadRowsResponseCellChunkPB(*args, **kw): def _make_cell_pb(value): - from google.cloud.bigtable.deprecated import row_data + from google.cloud.bigtable import row_data return row_data.Cell(value, TIMESTAMP_MICROS) diff --git a/tests/unit/v2_client/test_row_filters.py b/tests/unit/v2_client/test_row_filters.py index dfb16ba16..b312cb942 100644 --- a/tests/unit/v2_client/test_row_filters.py +++ b/tests/unit/v2_client/test_row_filters.py @@ -17,7 +17,7 @@ def test_bool_filter_constructor(): - from google.cloud.bigtable.deprecated.row_filters import _BoolFilter + from google.cloud.bigtable.row_filters import _BoolFilter flag = object() row_filter = _BoolFilter(flag) @@ -25,7 +25,7 @@ def test_bool_filter_constructor(): def test_bool_filter___eq__type_differ(): - from google.cloud.bigtable.deprecated.row_filters import _BoolFilter + from google.cloud.bigtable.row_filters import _BoolFilter flag = object() row_filter1 = _BoolFilter(flag) @@ -34,7 +34,7 @@ def test_bool_filter___eq__type_differ(): def test_bool_filter___eq__same_value(): - from google.cloud.bigtable.deprecated.row_filters import _BoolFilter + from google.cloud.bigtable.row_filters import _BoolFilter flag = object() row_filter1 = _BoolFilter(flag) @@ -43,7 +43,7 @@ def test_bool_filter___eq__same_value(): def test_bool_filter___ne__same_value(): - from google.cloud.bigtable.deprecated.row_filters import _BoolFilter + from google.cloud.bigtable.row_filters import _BoolFilter flag = object() row_filter1 = _BoolFilter(flag) @@ -52,7 +52,7 @@ def test_bool_filter___ne__same_value(): def test_sink_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import SinkFilter + from google.cloud.bigtable.row_filters import SinkFilter flag = True row_filter = SinkFilter(flag) @@ -62,7 +62,7 @@ def test_sink_filter_to_pb(): def test_pass_all_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import PassAllFilter + from google.cloud.bigtable.row_filters import PassAllFilter flag = True row_filter = PassAllFilter(flag) @@ -72,7 +72,7 @@ def test_pass_all_filter_to_pb(): def test_block_all_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import BlockAllFilter + from google.cloud.bigtable.row_filters import BlockAllFilter flag = True row_filter = BlockAllFilter(flag) @@ -82,7 +82,7 @@ def test_block_all_filter_to_pb(): def test_regex_filterconstructor(): - from google.cloud.bigtable.deprecated.row_filters import _RegexFilter + from google.cloud.bigtable.row_filters import _RegexFilter regex = b"abc" row_filter = _RegexFilter(regex) @@ -90,7 +90,7 @@ def test_regex_filterconstructor(): def test_regex_filterconstructor_non_bytes(): - from google.cloud.bigtable.deprecated.row_filters import _RegexFilter + from google.cloud.bigtable.row_filters import _RegexFilter regex = "abc" row_filter = _RegexFilter(regex) @@ -98,7 +98,7 @@ def test_regex_filterconstructor_non_bytes(): def test_regex_filter__eq__type_differ(): - from google.cloud.bigtable.deprecated.row_filters import _RegexFilter + from google.cloud.bigtable.row_filters import _RegexFilter regex = b"def-rgx" row_filter1 = _RegexFilter(regex) @@ -107,7 +107,7 @@ def test_regex_filter__eq__type_differ(): def test_regex_filter__eq__same_value(): - from google.cloud.bigtable.deprecated.row_filters import _RegexFilter + from google.cloud.bigtable.row_filters import _RegexFilter regex = b"trex-regex" row_filter1 = _RegexFilter(regex) @@ -116,7 +116,7 @@ def test_regex_filter__eq__same_value(): def test_regex_filter__ne__same_value(): - from google.cloud.bigtable.deprecated.row_filters import _RegexFilter + from google.cloud.bigtable.row_filters import _RegexFilter regex = b"abc" row_filter1 = _RegexFilter(regex) @@ -125,7 +125,7 @@ def test_regex_filter__ne__same_value(): def test_row_key_regex_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import RowKeyRegexFilter + from google.cloud.bigtable.row_filters import RowKeyRegexFilter regex = b"row-key-regex" row_filter = RowKeyRegexFilter(regex) @@ -135,7 +135,7 @@ def test_row_key_regex_filter_to_pb(): def test_row_sample_filter_constructor(): - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import RowSampleFilter sample = object() row_filter = RowSampleFilter(sample) @@ -143,7 +143,7 @@ def test_row_sample_filter_constructor(): def test_row_sample_filter___eq__type_differ(): - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import RowSampleFilter sample = object() row_filter1 = RowSampleFilter(sample) @@ -152,7 +152,7 @@ def test_row_sample_filter___eq__type_differ(): def test_row_sample_filter___eq__same_value(): - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import RowSampleFilter sample = object() row_filter1 = RowSampleFilter(sample) @@ -161,7 +161,7 @@ def test_row_sample_filter___eq__same_value(): def test_row_sample_filter___ne__(): - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import RowSampleFilter sample = object() other_sample = object() @@ -171,7 +171,7 @@ def test_row_sample_filter___ne__(): def test_row_sample_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import RowSampleFilter sample = 0.25 row_filter = RowSampleFilter(sample) @@ -181,7 +181,7 @@ def test_row_sample_filter_to_pb(): def test_family_name_regex_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import FamilyNameRegexFilter + from google.cloud.bigtable.row_filters import FamilyNameRegexFilter regex = "family-regex" row_filter = FamilyNameRegexFilter(regex) @@ -191,7 +191,7 @@ def test_family_name_regex_filter_to_pb(): def test_column_qualifier_regext_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import ColumnQualifierRegexFilter + from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter regex = b"column-regex" row_filter = ColumnQualifierRegexFilter(regex) @@ -201,7 +201,7 @@ def test_column_qualifier_regext_filter_to_pb(): def test_timestamp_range_constructor(): - from google.cloud.bigtable.deprecated.row_filters import TimestampRange + from google.cloud.bigtable.row_filters import TimestampRange start = object() end = object() @@ -211,7 +211,7 @@ def test_timestamp_range_constructor(): def test_timestamp_range___eq__(): - from google.cloud.bigtable.deprecated.row_filters import TimestampRange + from google.cloud.bigtable.row_filters import TimestampRange start = object() end = object() @@ -221,7 +221,7 @@ def test_timestamp_range___eq__(): def test_timestamp_range___eq__type_differ(): - from google.cloud.bigtable.deprecated.row_filters import TimestampRange + from google.cloud.bigtable.row_filters import TimestampRange start = object() end = object() @@ -231,7 +231,7 @@ def test_timestamp_range___eq__type_differ(): def test_timestamp_range___ne__same_value(): - from google.cloud.bigtable.deprecated.row_filters import TimestampRange + from google.cloud.bigtable.row_filters import TimestampRange start = object() end = object() @@ -243,7 +243,7 @@ def test_timestamp_range___ne__same_value(): def _timestamp_range_to_pb_helper(pb_kwargs, start=None, end=None): import datetime from google.cloud._helpers import _EPOCH - from google.cloud.bigtable.deprecated.row_filters import TimestampRange + from google.cloud.bigtable.row_filters import TimestampRange if start is not None: start = _EPOCH + datetime.timedelta(microseconds=start) @@ -291,7 +291,7 @@ def test_timestamp_range_to_pb_end_only(): def test_timestamp_range_filter_constructor(): - from google.cloud.bigtable.deprecated.row_filters import TimestampRangeFilter + from google.cloud.bigtable.row_filters import TimestampRangeFilter range_ = object() row_filter = TimestampRangeFilter(range_) @@ -299,7 +299,7 @@ def test_timestamp_range_filter_constructor(): def test_timestamp_range_filter___eq__type_differ(): - from google.cloud.bigtable.deprecated.row_filters import TimestampRangeFilter + from google.cloud.bigtable.row_filters import TimestampRangeFilter range_ = object() row_filter1 = TimestampRangeFilter(range_) @@ -308,7 +308,7 @@ def test_timestamp_range_filter___eq__type_differ(): def test_timestamp_range_filter___eq__same_value(): - from google.cloud.bigtable.deprecated.row_filters import TimestampRangeFilter + from google.cloud.bigtable.row_filters import TimestampRangeFilter range_ = object() row_filter1 = TimestampRangeFilter(range_) @@ -317,7 +317,7 @@ def test_timestamp_range_filter___eq__same_value(): def test_timestamp_range_filter___ne__(): - from google.cloud.bigtable.deprecated.row_filters import TimestampRangeFilter + from google.cloud.bigtable.row_filters import TimestampRangeFilter range_ = object() other_range_ = object() @@ -327,8 +327,8 @@ def test_timestamp_range_filter___ne__(): def test_timestamp_range_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import TimestampRangeFilter - from google.cloud.bigtable.deprecated.row_filters import TimestampRange + from google.cloud.bigtable.row_filters import TimestampRangeFilter + from google.cloud.bigtable.row_filters import TimestampRange range_ = TimestampRange() row_filter = TimestampRangeFilter(range_) @@ -338,7 +338,7 @@ def test_timestamp_range_filter_to_pb(): def test_column_range_filter_constructor_defaults(): - from google.cloud.bigtable.deprecated.row_filters import ColumnRangeFilter + from google.cloud.bigtable.row_filters import ColumnRangeFilter column_family_id = object() row_filter = ColumnRangeFilter(column_family_id) @@ -350,7 +350,7 @@ def test_column_range_filter_constructor_defaults(): def test_column_range_filter_constructor_explicit(): - from google.cloud.bigtable.deprecated.row_filters import ColumnRangeFilter + from google.cloud.bigtable.row_filters import ColumnRangeFilter column_family_id = object() start_column = object() @@ -372,7 +372,7 @@ def test_column_range_filter_constructor_explicit(): def test_column_range_filter_constructor_bad_start(): - from google.cloud.bigtable.deprecated.row_filters import ColumnRangeFilter + from google.cloud.bigtable.row_filters import ColumnRangeFilter column_family_id = object() with pytest.raises(ValueError): @@ -380,7 +380,7 @@ def test_column_range_filter_constructor_bad_start(): def test_column_range_filter_constructor_bad_end(): - from google.cloud.bigtable.deprecated.row_filters import ColumnRangeFilter + from google.cloud.bigtable.row_filters import ColumnRangeFilter column_family_id = object() with pytest.raises(ValueError): @@ -388,7 +388,7 @@ def test_column_range_filter_constructor_bad_end(): def test_column_range_filter___eq__(): - from google.cloud.bigtable.deprecated.row_filters import ColumnRangeFilter + from google.cloud.bigtable.row_filters import ColumnRangeFilter column_family_id = object() start_column = object() @@ -413,7 +413,7 @@ def test_column_range_filter___eq__(): def test_column_range_filter___eq__type_differ(): - from google.cloud.bigtable.deprecated.row_filters import ColumnRangeFilter + from google.cloud.bigtable.row_filters import ColumnRangeFilter column_family_id = object() row_filter1 = ColumnRangeFilter(column_family_id) @@ -422,7 +422,7 @@ def test_column_range_filter___eq__type_differ(): def test_column_range_filter___ne__(): - from google.cloud.bigtable.deprecated.row_filters import ColumnRangeFilter + from google.cloud.bigtable.row_filters import ColumnRangeFilter column_family_id = object() other_column_family_id = object() @@ -448,7 +448,7 @@ def test_column_range_filter___ne__(): def test_column_range_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import ColumnRangeFilter + from google.cloud.bigtable.row_filters import ColumnRangeFilter column_family_id = "column-family-id" row_filter = ColumnRangeFilter(column_family_id) @@ -458,7 +458,7 @@ def test_column_range_filter_to_pb(): def test_column_range_filter_to_pb_inclusive_start(): - from google.cloud.bigtable.deprecated.row_filters import ColumnRangeFilter + from google.cloud.bigtable.row_filters import ColumnRangeFilter column_family_id = "column-family-id" column = b"column" @@ -471,7 +471,7 @@ def test_column_range_filter_to_pb_inclusive_start(): def test_column_range_filter_to_pb_exclusive_start(): - from google.cloud.bigtable.deprecated.row_filters import ColumnRangeFilter + from google.cloud.bigtable.row_filters import ColumnRangeFilter column_family_id = "column-family-id" column = b"column" @@ -486,7 +486,7 @@ def test_column_range_filter_to_pb_exclusive_start(): def test_column_range_filter_to_pb_inclusive_end(): - from google.cloud.bigtable.deprecated.row_filters import ColumnRangeFilter + from google.cloud.bigtable.row_filters import ColumnRangeFilter column_family_id = "column-family-id" column = b"column" @@ -499,7 +499,7 @@ def test_column_range_filter_to_pb_inclusive_end(): def test_column_range_filter_to_pb_exclusive_end(): - from google.cloud.bigtable.deprecated.row_filters import ColumnRangeFilter + from google.cloud.bigtable.row_filters import ColumnRangeFilter column_family_id = "column-family-id" column = b"column" @@ -514,7 +514,7 @@ def test_column_range_filter_to_pb_exclusive_end(): def test_value_regex_filter_to_pb_w_bytes(): - from google.cloud.bigtable.deprecated.row_filters import ValueRegexFilter + from google.cloud.bigtable.row_filters import ValueRegexFilter value = regex = b"value-regex" row_filter = ValueRegexFilter(value) @@ -524,7 +524,7 @@ def test_value_regex_filter_to_pb_w_bytes(): def test_value_regex_filter_to_pb_w_str(): - from google.cloud.bigtable.deprecated.row_filters import ValueRegexFilter + from google.cloud.bigtable.row_filters import ValueRegexFilter value = "value-regex" regex = value.encode("ascii") @@ -535,7 +535,7 @@ def test_value_regex_filter_to_pb_w_str(): def test_exact_value_filter_to_pb_w_bytes(): - from google.cloud.bigtable.deprecated.row_filters import ExactValueFilter + from google.cloud.bigtable.row_filters import ExactValueFilter value = regex = b"value-regex" row_filter = ExactValueFilter(value) @@ -545,7 +545,7 @@ def test_exact_value_filter_to_pb_w_bytes(): def test_exact_value_filter_to_pb_w_str(): - from google.cloud.bigtable.deprecated.row_filters import ExactValueFilter + from google.cloud.bigtable.row_filters import ExactValueFilter value = "value-regex" regex = value.encode("ascii") @@ -557,7 +557,7 @@ def test_exact_value_filter_to_pb_w_str(): def test_exact_value_filter_to_pb_w_int(): import struct - from google.cloud.bigtable.deprecated.row_filters import ExactValueFilter + from google.cloud.bigtable.row_filters import ExactValueFilter value = 1 regex = struct.Struct(">q").pack(value) @@ -568,7 +568,7 @@ def test_exact_value_filter_to_pb_w_int(): def test_value_range_filter_constructor_defaults(): - from google.cloud.bigtable.deprecated.row_filters import ValueRangeFilter + from google.cloud.bigtable.row_filters import ValueRangeFilter row_filter = ValueRangeFilter() @@ -579,7 +579,7 @@ def test_value_range_filter_constructor_defaults(): def test_value_range_filter_constructor_explicit(): - from google.cloud.bigtable.deprecated.row_filters import ValueRangeFilter + from google.cloud.bigtable.row_filters import ValueRangeFilter start_value = object() end_value = object() @@ -600,7 +600,7 @@ def test_value_range_filter_constructor_explicit(): def test_value_range_filter_constructor_w_int_values(): - from google.cloud.bigtable.deprecated.row_filters import ValueRangeFilter + from google.cloud.bigtable.row_filters import ValueRangeFilter import struct start_value = 1 @@ -618,21 +618,21 @@ def test_value_range_filter_constructor_w_int_values(): def test_value_range_filter_constructor_bad_start(): - from google.cloud.bigtable.deprecated.row_filters import ValueRangeFilter + from google.cloud.bigtable.row_filters import ValueRangeFilter with pytest.raises(ValueError): ValueRangeFilter(inclusive_start=True) def test_value_range_filter_constructor_bad_end(): - from google.cloud.bigtable.deprecated.row_filters import ValueRangeFilter + from google.cloud.bigtable.row_filters import ValueRangeFilter with pytest.raises(ValueError): ValueRangeFilter(inclusive_end=True) def test_value_range_filter___eq__(): - from google.cloud.bigtable.deprecated.row_filters import ValueRangeFilter + from google.cloud.bigtable.row_filters import ValueRangeFilter start_value = object() end_value = object() @@ -654,7 +654,7 @@ def test_value_range_filter___eq__(): def test_value_range_filter___eq__type_differ(): - from google.cloud.bigtable.deprecated.row_filters import ValueRangeFilter + from google.cloud.bigtable.row_filters import ValueRangeFilter row_filter1 = ValueRangeFilter() row_filter2 = object() @@ -662,7 +662,7 @@ def test_value_range_filter___eq__type_differ(): def test_value_range_filter___ne__(): - from google.cloud.bigtable.deprecated.row_filters import ValueRangeFilter + from google.cloud.bigtable.row_filters import ValueRangeFilter start_value = object() other_start_value = object() @@ -685,7 +685,7 @@ def test_value_range_filter___ne__(): def test_value_range_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import ValueRangeFilter + from google.cloud.bigtable.row_filters import ValueRangeFilter row_filter = ValueRangeFilter() expected_pb = _RowFilterPB(value_range_filter=_ValueRangePB()) @@ -693,7 +693,7 @@ def test_value_range_filter_to_pb(): def test_value_range_filter_to_pb_inclusive_start(): - from google.cloud.bigtable.deprecated.row_filters import ValueRangeFilter + from google.cloud.bigtable.row_filters import ValueRangeFilter value = b"some-value" row_filter = ValueRangeFilter(start_value=value) @@ -703,7 +703,7 @@ def test_value_range_filter_to_pb_inclusive_start(): def test_value_range_filter_to_pb_exclusive_start(): - from google.cloud.bigtable.deprecated.row_filters import ValueRangeFilter + from google.cloud.bigtable.row_filters import ValueRangeFilter value = b"some-value" row_filter = ValueRangeFilter(start_value=value, inclusive_start=False) @@ -713,7 +713,7 @@ def test_value_range_filter_to_pb_exclusive_start(): def test_value_range_filter_to_pb_inclusive_end(): - from google.cloud.bigtable.deprecated.row_filters import ValueRangeFilter + from google.cloud.bigtable.row_filters import ValueRangeFilter value = b"some-value" row_filter = ValueRangeFilter(end_value=value) @@ -723,7 +723,7 @@ def test_value_range_filter_to_pb_inclusive_end(): def test_value_range_filter_to_pb_exclusive_end(): - from google.cloud.bigtable.deprecated.row_filters import ValueRangeFilter + from google.cloud.bigtable.row_filters import ValueRangeFilter value = b"some-value" row_filter = ValueRangeFilter(end_value=value, inclusive_end=False) @@ -733,7 +733,7 @@ def test_value_range_filter_to_pb_exclusive_end(): def test_cell_count_constructor(): - from google.cloud.bigtable.deprecated.row_filters import _CellCountFilter + from google.cloud.bigtable.row_filters import _CellCountFilter num_cells = object() row_filter = _CellCountFilter(num_cells) @@ -741,7 +741,7 @@ def test_cell_count_constructor(): def test_cell_count___eq__type_differ(): - from google.cloud.bigtable.deprecated.row_filters import _CellCountFilter + from google.cloud.bigtable.row_filters import _CellCountFilter num_cells = object() row_filter1 = _CellCountFilter(num_cells) @@ -750,7 +750,7 @@ def test_cell_count___eq__type_differ(): def test_cell_count___eq__same_value(): - from google.cloud.bigtable.deprecated.row_filters import _CellCountFilter + from google.cloud.bigtable.row_filters import _CellCountFilter num_cells = object() row_filter1 = _CellCountFilter(num_cells) @@ -759,7 +759,7 @@ def test_cell_count___eq__same_value(): def test_cell_count___ne__same_value(): - from google.cloud.bigtable.deprecated.row_filters import _CellCountFilter + from google.cloud.bigtable.row_filters import _CellCountFilter num_cells = object() row_filter1 = _CellCountFilter(num_cells) @@ -768,7 +768,7 @@ def test_cell_count___ne__same_value(): def test_cells_row_offset_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable.row_filters import CellsRowOffsetFilter num_cells = 76 row_filter = CellsRowOffsetFilter(num_cells) @@ -778,7 +778,7 @@ def test_cells_row_offset_filter_to_pb(): def test_cells_row_limit_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.row_filters import CellsRowLimitFilter num_cells = 189 row_filter = CellsRowLimitFilter(num_cells) @@ -788,7 +788,7 @@ def test_cells_row_limit_filter_to_pb(): def test_cells_column_limit_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import CellsColumnLimitFilter + from google.cloud.bigtable.row_filters import CellsColumnLimitFilter num_cells = 10 row_filter = CellsColumnLimitFilter(num_cells) @@ -798,7 +798,7 @@ def test_cells_column_limit_filter_to_pb(): def test_strip_value_transformer_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter flag = True row_filter = StripValueTransformerFilter(flag) @@ -808,7 +808,7 @@ def test_strip_value_transformer_filter_to_pb(): def test_apply_label_filter_constructor(): - from google.cloud.bigtable.deprecated.row_filters import ApplyLabelFilter + from google.cloud.bigtable.row_filters import ApplyLabelFilter label = object() row_filter = ApplyLabelFilter(label) @@ -816,7 +816,7 @@ def test_apply_label_filter_constructor(): def test_apply_label_filter___eq__type_differ(): - from google.cloud.bigtable.deprecated.row_filters import ApplyLabelFilter + from google.cloud.bigtable.row_filters import ApplyLabelFilter label = object() row_filter1 = ApplyLabelFilter(label) @@ -825,7 +825,7 @@ def test_apply_label_filter___eq__type_differ(): def test_apply_label_filter___eq__same_value(): - from google.cloud.bigtable.deprecated.row_filters import ApplyLabelFilter + from google.cloud.bigtable.row_filters import ApplyLabelFilter label = object() row_filter1 = ApplyLabelFilter(label) @@ -834,7 +834,7 @@ def test_apply_label_filter___eq__same_value(): def test_apply_label_filter___ne__(): - from google.cloud.bigtable.deprecated.row_filters import ApplyLabelFilter + from google.cloud.bigtable.row_filters import ApplyLabelFilter label = object() other_label = object() @@ -844,7 +844,7 @@ def test_apply_label_filter___ne__(): def test_apply_label_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import ApplyLabelFilter + from google.cloud.bigtable.row_filters import ApplyLabelFilter label = "label" row_filter = ApplyLabelFilter(label) @@ -854,14 +854,14 @@ def test_apply_label_filter_to_pb(): def test_filter_combination_constructor_defaults(): - from google.cloud.bigtable.deprecated.row_filters import _FilterCombination + from google.cloud.bigtable.row_filters import _FilterCombination row_filter = _FilterCombination() assert row_filter.filters == [] def test_filter_combination_constructor_explicit(): - from google.cloud.bigtable.deprecated.row_filters import _FilterCombination + from google.cloud.bigtable.row_filters import _FilterCombination filters = object() row_filter = _FilterCombination(filters=filters) @@ -869,7 +869,7 @@ def test_filter_combination_constructor_explicit(): def test_filter_combination___eq__(): - from google.cloud.bigtable.deprecated.row_filters import _FilterCombination + from google.cloud.bigtable.row_filters import _FilterCombination filters = object() row_filter1 = _FilterCombination(filters=filters) @@ -878,7 +878,7 @@ def test_filter_combination___eq__(): def test_filter_combination___eq__type_differ(): - from google.cloud.bigtable.deprecated.row_filters import _FilterCombination + from google.cloud.bigtable.row_filters import _FilterCombination filters = object() row_filter1 = _FilterCombination(filters=filters) @@ -887,7 +887,7 @@ def test_filter_combination___eq__type_differ(): def test_filter_combination___ne__(): - from google.cloud.bigtable.deprecated.row_filters import _FilterCombination + from google.cloud.bigtable.row_filters import _FilterCombination filters = object() other_filters = object() @@ -897,9 +897,9 @@ def test_filter_combination___ne__(): def test_row_filter_chain_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import RowFilterChain - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter - from google.cloud.bigtable.deprecated.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.row_filters import RowFilterChain + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1.to_pb() @@ -917,10 +917,10 @@ def test_row_filter_chain_to_pb(): def test_row_filter_chain_to_pb_nested(): - from google.cloud.bigtable.deprecated.row_filters import CellsRowLimitFilter - from google.cloud.bigtable.deprecated.row_filters import RowFilterChain - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter - from google.cloud.bigtable.deprecated.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.row_filters import RowFilterChain + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) @@ -941,9 +941,9 @@ def test_row_filter_chain_to_pb_nested(): def test_row_filter_union_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import RowFilterUnion - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter - from google.cloud.bigtable.deprecated.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.row_filters import RowFilterUnion + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1.to_pb() @@ -961,10 +961,10 @@ def test_row_filter_union_to_pb(): def test_row_filter_union_to_pb_nested(): - from google.cloud.bigtable.deprecated.row_filters import CellsRowLimitFilter - from google.cloud.bigtable.deprecated.row_filters import RowFilterUnion - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter - from google.cloud.bigtable.deprecated.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.row_filters import RowFilterUnion + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) @@ -985,7 +985,7 @@ def test_row_filter_union_to_pb_nested(): def test_conditional_row_filter_constructor(): - from google.cloud.bigtable.deprecated.row_filters import ConditionalRowFilter + from google.cloud.bigtable.row_filters import ConditionalRowFilter base_filter = object() true_filter = object() @@ -999,7 +999,7 @@ def test_conditional_row_filter_constructor(): def test_conditional_row_filter___eq__(): - from google.cloud.bigtable.deprecated.row_filters import ConditionalRowFilter + from google.cloud.bigtable.row_filters import ConditionalRowFilter base_filter = object() true_filter = object() @@ -1014,7 +1014,7 @@ def test_conditional_row_filter___eq__(): def test_conditional_row_filter___eq__type_differ(): - from google.cloud.bigtable.deprecated.row_filters import ConditionalRowFilter + from google.cloud.bigtable.row_filters import ConditionalRowFilter base_filter = object() true_filter = object() @@ -1027,7 +1027,7 @@ def test_conditional_row_filter___eq__type_differ(): def test_conditional_row_filter___ne__(): - from google.cloud.bigtable.deprecated.row_filters import ConditionalRowFilter + from google.cloud.bigtable.row_filters import ConditionalRowFilter base_filter = object() other_base_filter = object() @@ -1043,10 +1043,10 @@ def test_conditional_row_filter___ne__(): def test_conditional_row_filter_to_pb(): - from google.cloud.bigtable.deprecated.row_filters import ConditionalRowFilter - from google.cloud.bigtable.deprecated.row_filters import CellsRowOffsetFilter - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter - from google.cloud.bigtable.deprecated.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.row_filters import ConditionalRowFilter + from google.cloud.bigtable.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1.to_pb() @@ -1073,9 +1073,9 @@ def test_conditional_row_filter_to_pb(): def test_conditional_row_filter_to_pb_true_only(): - from google.cloud.bigtable.deprecated.row_filters import ConditionalRowFilter - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter - from google.cloud.bigtable.deprecated.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.row_filters import ConditionalRowFilter + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1.to_pb() @@ -1095,9 +1095,9 @@ def test_conditional_row_filter_to_pb_true_only(): def test_conditional_row_filter_to_pb_false_only(): - from google.cloud.bigtable.deprecated.row_filters import ConditionalRowFilter - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter - from google.cloud.bigtable.deprecated.row_filters import StripValueTransformerFilter + from google.cloud.bigtable.row_filters import ConditionalRowFilter + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1.to_pb() diff --git a/tests/unit/v2_client/test_row_merger.py b/tests/unit/v2_client/test_row_merger.py index 26cedb34d..483c04536 100644 --- a/tests/unit/v2_client/test_row_merger.py +++ b/tests/unit/v2_client/test_row_merger.py @@ -5,13 +5,9 @@ import proto import pytest -from google.cloud.bigtable.deprecated.row_data import ( - PartialRowsData, - PartialRowData, - InvalidChunk, -) +from google.cloud.bigtable.row_data import PartialRowsData, PartialRowData, InvalidChunk from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse -from google.cloud.bigtable.deprecated.row_merger import _RowMerger +from google.cloud.bigtable.row_merger import _RowMerger # TODO: autogenerate protos from diff --git a/tests/unit/v2_client/test_row_set.py b/tests/unit/v2_client/test_row_set.py index ce0e9bfea..1a33be720 100644 --- a/tests/unit/v2_client/test_row_set.py +++ b/tests/unit/v2_client/test_row_set.py @@ -14,7 +14,7 @@ def test_row_set_constructor(): - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowSet row_set = RowSet() assert [] == row_set.row_keys @@ -22,8 +22,8 @@ def test_row_set_constructor(): def test_row_set__eq__(): - from google.cloud.bigtable.deprecated.row_set import RowRange - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet row_key1 = b"row_key1" row_key2 = b"row_key1" @@ -42,7 +42,7 @@ def test_row_set__eq__(): def test_row_set__eq__type_differ(): - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowSet row_set1 = RowSet() row_set2 = object() @@ -50,7 +50,7 @@ def test_row_set__eq__type_differ(): def test_row_set__eq__len_row_keys_differ(): - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowSet row_key1 = b"row_key1" row_key2 = b"row_key1" @@ -66,8 +66,8 @@ def test_row_set__eq__len_row_keys_differ(): def test_row_set__eq__len_row_ranges_differ(): - from google.cloud.bigtable.deprecated.row_set import RowRange - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet row_range1 = RowRange(b"row_key4", b"row_key9") row_range2 = RowRange(b"row_key4", b"row_key9") @@ -83,7 +83,7 @@ def test_row_set__eq__len_row_ranges_differ(): def test_row_set__eq__row_keys_differ(): - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowSet row_set1 = RowSet() row_set2 = RowSet() @@ -99,8 +99,8 @@ def test_row_set__eq__row_keys_differ(): def test_row_set__eq__row_ranges_differ(): - from google.cloud.bigtable.deprecated.row_set import RowRange - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet row_range1 = RowRange(b"row_key4", b"row_key9") row_range2 = RowRange(b"row_key14", b"row_key19") @@ -119,8 +119,8 @@ def test_row_set__eq__row_ranges_differ(): def test_row_set__ne__(): - from google.cloud.bigtable.deprecated.row_set import RowRange - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet row_key1 = b"row_key1" row_key2 = b"row_key1" @@ -139,8 +139,8 @@ def test_row_set__ne__(): def test_row_set__ne__same_value(): - from google.cloud.bigtable.deprecated.row_set import RowRange - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet row_key1 = b"row_key1" row_key2 = b"row_key1" @@ -159,7 +159,7 @@ def test_row_set__ne__same_value(): def test_row_set_add_row_key(): - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowSet row_set = RowSet() row_set.add_row_key("row_key1") @@ -168,8 +168,8 @@ def test_row_set_add_row_key(): def test_row_set_add_row_range(): - from google.cloud.bigtable.deprecated.row_set import RowRange - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet row_set = RowSet() row_range1 = RowRange(b"row_key1", b"row_key9") @@ -181,7 +181,7 @@ def test_row_set_add_row_range(): def test_row_set_add_row_range_from_keys(): - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowSet row_set = RowSet() row_set.add_row_range_from_keys( @@ -194,7 +194,7 @@ def test_row_set_add_row_range_from_keys(): def test_row_set_add_row_range_with_prefix(): - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowSet row_set = RowSet() row_set.add_row_range_with_prefix("row") @@ -203,8 +203,8 @@ def test_row_set_add_row_range_with_prefix(): def test_row_set__update_message_request(): from google.cloud._helpers import _to_bytes - from google.cloud.bigtable.deprecated.row_set import RowRange - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet row_set = RowSet() table_name = "table_name" @@ -224,7 +224,7 @@ def test_row_set__update_message_request(): def test_row_range_constructor(): - from google.cloud.bigtable.deprecated.row_set import RowRange + from google.cloud.bigtable.row_set import RowRange start_key = "row_key1" end_key = "row_key9" @@ -236,7 +236,7 @@ def test_row_range_constructor(): def test_row_range___hash__set_equality(): - from google.cloud.bigtable.deprecated.row_set import RowRange + from google.cloud.bigtable.row_set import RowRange row_range1 = RowRange("row_key1", "row_key9") row_range2 = RowRange("row_key1", "row_key9") @@ -246,7 +246,7 @@ def test_row_range___hash__set_equality(): def test_row_range___hash__not_equals(): - from google.cloud.bigtable.deprecated.row_set import RowRange + from google.cloud.bigtable.row_set import RowRange row_range1 = RowRange("row_key1", "row_key9") row_range2 = RowRange("row_key1", "row_key19") @@ -256,7 +256,7 @@ def test_row_range___hash__not_equals(): def test_row_range__eq__(): - from google.cloud.bigtable.deprecated.row_set import RowRange + from google.cloud.bigtable.row_set import RowRange start_key = b"row_key1" end_key = b"row_key9" @@ -266,7 +266,7 @@ def test_row_range__eq__(): def test_row_range___eq__type_differ(): - from google.cloud.bigtable.deprecated.row_set import RowRange + from google.cloud.bigtable.row_set import RowRange start_key = b"row_key1" end_key = b"row_key9" @@ -276,7 +276,7 @@ def test_row_range___eq__type_differ(): def test_row_range__ne__(): - from google.cloud.bigtable.deprecated.row_set import RowRange + from google.cloud.bigtable.row_set import RowRange start_key = b"row_key1" end_key = b"row_key9" @@ -286,7 +286,7 @@ def test_row_range__ne__(): def test_row_range__ne__same_value(): - from google.cloud.bigtable.deprecated.row_set import RowRange + from google.cloud.bigtable.row_set import RowRange start_key = b"row_key1" end_key = b"row_key9" @@ -296,7 +296,7 @@ def test_row_range__ne__same_value(): def test_row_range_get_range_kwargs_closed_open(): - from google.cloud.bigtable.deprecated.row_set import RowRange + from google.cloud.bigtable.row_set import RowRange start_key = b"row_key1" end_key = b"row_key9" @@ -307,7 +307,7 @@ def test_row_range_get_range_kwargs_closed_open(): def test_row_range_get_range_kwargs_open_closed(): - from google.cloud.bigtable.deprecated.row_set import RowRange + from google.cloud.bigtable.row_set import RowRange start_key = b"row_key1" end_key = b"row_key9" diff --git a/tests/unit/v2_client/test_table.py b/tests/unit/v2_client/test_table.py index ad31e8bc9..3d7d2e8ee 100644 --- a/tests/unit/v2_client/test_table.py +++ b/tests/unit/v2_client/test_table.py @@ -50,11 +50,11 @@ STATUS_INTERNAL = StatusCode.INTERNAL.value[0] -@mock.patch("google.cloud.bigtable.deprecated.table._MAX_BULK_MUTATIONS", new=3) +@mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3) def test__compile_mutation_entries_w_too_many_mutations(): - from google.cloud.bigtable.deprecated.row import DirectRow - from google.cloud.bigtable.deprecated.table import TooManyMutationsError - from google.cloud.bigtable.deprecated.table import _compile_mutation_entries + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import TooManyMutationsError + from google.cloud.bigtable.table import _compile_mutation_entries table = mock.Mock(name="table", spec=["name"]) table.name = "table" @@ -72,8 +72,8 @@ def test__compile_mutation_entries_w_too_many_mutations(): def test__compile_mutation_entries_normal(): - from google.cloud.bigtable.deprecated.row import DirectRow - from google.cloud.bigtable.deprecated.table import _compile_mutation_entries + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import _compile_mutation_entries from google.cloud.bigtable_v2.types import MutateRowsRequest from google.cloud.bigtable_v2.types import data @@ -109,9 +109,9 @@ def test__compile_mutation_entries_normal(): def test__check_row_table_name_w_wrong_table_name(): - from google.cloud.bigtable.deprecated.table import _check_row_table_name - from google.cloud.bigtable.deprecated.table import TableMismatchError - from google.cloud.bigtable.deprecated.row import DirectRow + from google.cloud.bigtable.table import _check_row_table_name + from google.cloud.bigtable.table import TableMismatchError + from google.cloud.bigtable.row import DirectRow table = mock.Mock(name="table", spec=["name"]) table.name = "table" @@ -122,8 +122,8 @@ def test__check_row_table_name_w_wrong_table_name(): def test__check_row_table_name_w_right_table_name(): - from google.cloud.bigtable.deprecated.row import DirectRow - from google.cloud.bigtable.deprecated.table import _check_row_table_name + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import _check_row_table_name table = mock.Mock(name="table", spec=["name"]) table.name = "table" @@ -133,8 +133,8 @@ def test__check_row_table_name_w_right_table_name(): def test__check_row_type_w_wrong_row_type(): - from google.cloud.bigtable.deprecated.row import ConditionalRow - from google.cloud.bigtable.deprecated.table import _check_row_type + from google.cloud.bigtable.row import ConditionalRow + from google.cloud.bigtable.table import _check_row_type row = ConditionalRow(row_key=b"row_key", table="table", filter_=None) with pytest.raises(TypeError): @@ -142,21 +142,21 @@ def test__check_row_type_w_wrong_row_type(): def test__check_row_type_w_right_row_type(): - from google.cloud.bigtable.deprecated.row import DirectRow - from google.cloud.bigtable.deprecated.table import _check_row_type + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import _check_row_type row = DirectRow(row_key=b"row_key", table="table") assert not _check_row_type(row) def _make_client(*args, **kwargs): - from google.cloud.bigtable.deprecated.client import Client + from google.cloud.bigtable.client import Client return Client(*args, **kwargs) def _make_table(*args, **kwargs): - from google.cloud.bigtable.deprecated.table import Table + from google.cloud.bigtable.table import Table return Table(*args, **kwargs) @@ -219,7 +219,7 @@ def _table_row_methods_helper(): def test_table_row_factory_direct(): - from google.cloud.bigtable.deprecated.row import DirectRow + from google.cloud.bigtable.row import DirectRow table, row_key = _table_row_methods_helper() with warnings.catch_warnings(record=True) as warned: @@ -234,7 +234,7 @@ def test_table_row_factory_direct(): def test_table_row_factory_conditional(): - from google.cloud.bigtable.deprecated.row import ConditionalRow + from google.cloud.bigtable.row import ConditionalRow table, row_key = _table_row_methods_helper() filter_ = object() @@ -251,7 +251,7 @@ def test_table_row_factory_conditional(): def test_table_row_factory_append(): - from google.cloud.bigtable.deprecated.row import AppendRow + from google.cloud.bigtable.row import AppendRow table, row_key = _table_row_methods_helper() @@ -278,7 +278,7 @@ def test_table_row_factory_failure(): def test_table_direct_row(): - from google.cloud.bigtable.deprecated.row import DirectRow + from google.cloud.bigtable.row import DirectRow table, row_key = _table_row_methods_helper() row = table.direct_row(row_key) @@ -289,7 +289,7 @@ def test_table_direct_row(): def test_table_conditional_row(): - from google.cloud.bigtable.deprecated.row import ConditionalRow + from google.cloud.bigtable.row import ConditionalRow table, row_key = _table_row_methods_helper() filter_ = object() @@ -301,7 +301,7 @@ def test_table_conditional_row(): def test_table_append_row(): - from google.cloud.bigtable.deprecated.row import AppendRow + from google.cloud.bigtable.row import AppendRow table, row_key = _table_row_methods_helper() row = table.append_row(row_key) @@ -357,7 +357,7 @@ def _create_table_helper(split_keys=[], column_families={}): from google.cloud.bigtable_admin_v2.types import ( bigtable_table_admin as table_admin_messages_v2_pb2, ) - from google.cloud.bigtable.deprecated.column_family import ColumnFamily + from google.cloud.bigtable.column_family import ColumnFamily credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -391,7 +391,7 @@ def test_table_create(): def test_table_create_with_families(): - from google.cloud.bigtable.deprecated.column_family import MaxVersionsGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule families = {"family": MaxVersionsGCRule(5)} _create_table_helper(column_families=families) @@ -404,7 +404,7 @@ def test_table_create_with_split_keys(): def test_table_exists_hit(): from google.cloud.bigtable_admin_v2.types import ListTablesResponse from google.cloud.bigtable_admin_v2.types import Table - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import enums credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -426,7 +426,7 @@ def test_table_exists_hit(): def test_table_exists_miss(): from google.api_core.exceptions import NotFound - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import enums credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -447,7 +447,7 @@ def test_table_exists_miss(): def test_table_exists_error(): from google.api_core.exceptions import BadRequest - from google.cloud.bigtable.deprecated import enums + from google.cloud.bigtable import enums credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -512,8 +512,8 @@ def test_table_list_column_families(): def test_table_get_cluster_states(): - from google.cloud.bigtable.deprecated.enums import Table as enum_table - from google.cloud.bigtable.deprecated.table import ClusterState + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState INITIALIZING = enum_table.ReplicationState.INITIALIZING PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE @@ -557,10 +557,10 @@ def test_table_get_cluster_states(): def test_table_get_encryption_info(): from google.rpc.code_pb2 import Code - from google.cloud.bigtable.deprecated.encryption_info import EncryptionInfo - from google.cloud.bigtable.deprecated.enums import EncryptionInfo as enum_crypto - from google.cloud.bigtable.deprecated.enums import Table as enum_table - from google.cloud.bigtable.deprecated.error import Status + from google.cloud.bigtable.encryption_info import EncryptionInfo + from google.cloud.bigtable.enums import EncryptionInfo as enum_crypto + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.error import Status ENCRYPTION_TYPE_UNSPECIFIED = enum_crypto.EncryptionType.ENCRYPTION_TYPE_UNSPECIFIED GOOGLE_DEFAULT_ENCRYPTION = enum_crypto.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION @@ -640,9 +640,10 @@ def _make_data_api(): def _table_read_row_helper(chunks, expected_result, app_profile_id=None): from google.cloud._testing import _Monkey - from google.cloud.bigtable.deprecated import table as MUT - from google.cloud.bigtable.deprecated.row_set import RowSet - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter + from google.cloud.bigtable import table as MUT + from google.cloud.bigtable.row_set import RowSet + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -691,7 +692,9 @@ def mock_create_row_request(table_name, **kwargs): assert result == expected_result assert mock_created == expected_request - data_api.read_rows.assert_called_once_with(request_pb, timeout=61.0) + data_api.read_rows.assert_called_once_with( + request_pb, timeout=61.0, retry=DEFAULT_RETRY_READ_ROWS + ) def test_table_read_row_miss_no__responses(): @@ -704,8 +707,8 @@ def test_table_read_row_miss_no_chunks_in_response(): def test_table_read_row_complete(): - from google.cloud.bigtable.deprecated.row_data import Cell - from google.cloud.bigtable.deprecated.row_data import PartialRowData + from google.cloud.bigtable.row_data import Cell + from google.cloud.bigtable.row_data import PartialRowData app_profile_id = "app-profile-id" chunk = _ReadRowsResponseCellChunkPB( @@ -768,7 +771,7 @@ def _table_mutate_rows_helper( mutation_timeout=None, app_profile_id=None, retry=None, timeout=None ): from google.rpc.status_pb2 import Status - from google.cloud.bigtable.deprecated.table import DEFAULT_RETRY + from google.cloud.bigtable.table import DEFAULT_RETRY credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -787,7 +790,7 @@ def _table_mutate_rows_helper( response = [Status(code=0), Status(code=1)] instance_mock = mock.Mock(return_value=response) klass_mock = mock.patch( - "google.cloud.bigtable.deprecated.table._RetryableMutateRowsWorker", + "google.cloud.bigtable.table._RetryableMutateRowsWorker", new=mock.MagicMock(return_value=instance_mock), ) @@ -854,9 +857,9 @@ def test_table_mutate_rows_w_mutation_timeout_and_timeout_arg(): def test_table_read_rows(): from google.cloud._testing import _Monkey - from google.cloud.bigtable.deprecated.row_data import PartialRowsData - from google.cloud.bigtable.deprecated import table as MUT - from google.cloud.bigtable.deprecated.row_data import DEFAULT_RETRY_READ_ROWS + from google.cloud.bigtable.row_data import PartialRowsData + from google.cloud.bigtable import table as MUT + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -906,7 +909,7 @@ def mock_create_row_request(table_name, **kwargs): } assert mock_created == [(table.name, created_kwargs)] - data_api.read_rows.assert_called_once_with(request_pb, timeout=61.0) + data_api.read_rows.assert_called_once_with(request_pb, timeout=61.0, retry=retry) def test_table_read_retry_rows(): @@ -1017,7 +1020,7 @@ def test_table_read_retry_rows_no_full_table_scan(): def test_table_yield_retry_rows(): - from google.cloud.bigtable.deprecated.table import _create_row_request + from google.cloud.bigtable.table import _create_row_request credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -1079,9 +1082,10 @@ def test_table_yield_retry_rows(): def test_table_yield_rows_with_row_set(): - from google.cloud.bigtable.deprecated.row_set import RowSet - from google.cloud.bigtable.deprecated.row_set import RowRange - from google.cloud.bigtable.deprecated.table import _create_row_request + from google.cloud.bigtable.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.table import _create_row_request + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -1149,7 +1153,9 @@ def test_table_yield_rows_with_row_set(): end_key=ROW_KEY_2, ) expected_request.rows.row_keys.append(ROW_KEY_3) - data_api.read_rows.assert_called_once_with(expected_request, timeout=61.0) + data_api.read_rows.assert_called_once_with( + expected_request, timeout=61.0, retry=DEFAULT_RETRY_READ_ROWS + ) def test_table_sample_row_keys(): @@ -1174,9 +1180,7 @@ def test_table_truncate(): table = _make_table(TABLE_ID, instance) table_api = client._table_admin_client = _make_table_api() - with mock.patch( - "google.cloud.bigtable.deprecated.table.Table.name", new=TABLE_NAME - ): + with mock.patch("google.cloud.bigtable.table.Table.name", new=TABLE_NAME): result = table.truncate() assert result is None @@ -1257,7 +1261,7 @@ def test_table_mutations_batcher_factory(): def test_table_get_iam_policy(): from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -1288,8 +1292,8 @@ def test_table_get_iam_policy(): def test_table_set_iam_policy(): from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.deprecated.policy import Policy - from google.cloud.bigtable.deprecated.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -1351,7 +1355,7 @@ def test_table_test_iam_permissions(): def test_table_backup_factory_defaults(): - from google.cloud.bigtable.deprecated.backup import Backup + from google.cloud.bigtable.backup import Backup instance = _make_table(INSTANCE_ID, None) table = _make_table(TABLE_ID, instance) @@ -1375,8 +1379,8 @@ def test_table_backup_factory_defaults(): def test_table_backup_factory_non_defaults(): import datetime from google.cloud._helpers import UTC - from google.cloud.bigtable.deprecated.backup import Backup - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable.backup import Backup + from google.cloud.bigtable.instance import Instance instance = Instance(INSTANCE_ID, None) table = _make_table(TABLE_ID, instance) @@ -1406,7 +1410,7 @@ def _table_list_backups_helper(cluster_id=None, filter_=None, **kwargs): Backup as backup_pb, bigtable_table_admin, ) - from google.cloud.bigtable.deprecated.backup import Backup + from google.cloud.bigtable.backup import Backup client = _make_client( project=PROJECT_ID, credentials=_make_credentials(), admin=True @@ -1468,7 +1472,7 @@ def test_table_list_backups_w_options(): def _table_restore_helper(backup_name=None): - from google.cloud.bigtable.deprecated.instance import Instance + from google.cloud.bigtable.instance import Instance op_future = object() credentials = _make_credentials() @@ -1504,7 +1508,7 @@ def test_table_restore_table_w_backup_name(): def _make_worker(*args, **kwargs): - from google.cloud.bigtable.deprecated.table import _RetryableMutateRowsWorker + from google.cloud.bigtable.table import _RetryableMutateRowsWorker return _RetryableMutateRowsWorker(*args, **kwargs) @@ -1545,7 +1549,7 @@ def test_rmrw_callable_empty_rows(): def test_rmrw_callable_no_retry_strategy(): - from google.cloud.bigtable.deprecated.row import DirectRow + from google.cloud.bigtable.row import DirectRow # Setup: # - Mutate 3 rows. @@ -1587,8 +1591,8 @@ def test_rmrw_callable_no_retry_strategy(): def test_rmrw_callable_retry(): - from google.cloud.bigtable.deprecated.row import DirectRow - from google.cloud.bigtable.deprecated.table import DEFAULT_RETRY + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import DEFAULT_RETRY # Setup: # - Mutate 3 rows. @@ -1642,8 +1646,8 @@ def _do_mutate_retryable_rows_helper( mutate_rows_side_effect=None, ): from google.api_core.exceptions import ServiceUnavailable - from google.cloud.bigtable.deprecated.row import DirectRow - from google.cloud.bigtable.deprecated.table import _BigtableRetryableError + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import _BigtableRetryableError from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 # Setup: @@ -1799,9 +1803,7 @@ def test_rmrw_do_mutate_retryable_rows_w_retryable_error_internal_rst_stream_err # Raise internal server error with RST STREAM error messages # There should be no error raised and that the request is retried from google.api_core.exceptions import InternalServerError - from google.cloud.bigtable.deprecated.row_data import ( - RETRYABLE_INTERNAL_ERROR_MESSAGES, - ) + from google.cloud.bigtable.row_data import RETRYABLE_INTERNAL_ERROR_MESSAGES row_cells = [ (b"row_key_1", ("cf", b"col", b"value1")), @@ -2007,7 +2009,7 @@ def test_rmrw_do_mutate_retryable_rows_mismatch_num_responses(): def test__create_row_request_table_name_only(): - from google.cloud.bigtable.deprecated.table import _create_row_request + from google.cloud.bigtable.table import _create_row_request table_name = "table_name" result = _create_row_request(table_name) @@ -2016,14 +2018,14 @@ def test__create_row_request_table_name_only(): def test__create_row_request_row_range_row_set_conflict(): - from google.cloud.bigtable.deprecated.table import _create_row_request + from google.cloud.bigtable.table import _create_row_request with pytest.raises(ValueError): _create_row_request(None, end_key=object(), row_set=object()) def test__create_row_request_row_range_start_key(): - from google.cloud.bigtable.deprecated.table import _create_row_request + from google.cloud.bigtable.table import _create_row_request from google.cloud.bigtable_v2.types import RowRange table_name = "table_name" @@ -2036,7 +2038,7 @@ def test__create_row_request_row_range_start_key(): def test__create_row_request_row_range_end_key(): - from google.cloud.bigtable.deprecated.table import _create_row_request + from google.cloud.bigtable.table import _create_row_request from google.cloud.bigtable_v2.types import RowRange table_name = "table_name" @@ -2049,7 +2051,7 @@ def test__create_row_request_row_range_end_key(): def test__create_row_request_row_range_both_keys(): - from google.cloud.bigtable.deprecated.table import _create_row_request + from google.cloud.bigtable.table import _create_row_request from google.cloud.bigtable_v2.types import RowRange table_name = "table_name" @@ -2063,7 +2065,7 @@ def test__create_row_request_row_range_both_keys(): def test__create_row_request_row_range_both_keys_inclusive(): - from google.cloud.bigtable.deprecated.table import _create_row_request + from google.cloud.bigtable.table import _create_row_request from google.cloud.bigtable_v2.types import RowRange table_name = "table_name" @@ -2079,8 +2081,8 @@ def test__create_row_request_row_range_both_keys_inclusive(): def test__create_row_request_with_filter(): - from google.cloud.bigtable.deprecated.table import _create_row_request - from google.cloud.bigtable.deprecated.row_filters import RowSampleFilter + from google.cloud.bigtable.table import _create_row_request + from google.cloud.bigtable.row_filters import RowSampleFilter table_name = "table_name" row_filter = RowSampleFilter(0.33) @@ -2092,7 +2094,7 @@ def test__create_row_request_with_filter(): def test__create_row_request_with_limit(): - from google.cloud.bigtable.deprecated.table import _create_row_request + from google.cloud.bigtable.table import _create_row_request table_name = "table_name" limit = 1337 @@ -2102,8 +2104,8 @@ def test__create_row_request_with_limit(): def test__create_row_request_with_row_set(): - from google.cloud.bigtable.deprecated.table import _create_row_request - from google.cloud.bigtable.deprecated.row_set import RowSet + from google.cloud.bigtable.table import _create_row_request + from google.cloud.bigtable.row_set import RowSet table_name = "table_name" row_set = RowSet() @@ -2113,7 +2115,7 @@ def test__create_row_request_with_row_set(): def test__create_row_request_with_app_profile_id(): - from google.cloud.bigtable.deprecated.table import _create_row_request + from google.cloud.bigtable.table import _create_row_request table_name = "table_name" limit = 1337 @@ -2132,8 +2134,8 @@ def _ReadRowsRequestPB(*args, **kw): def test_cluster_state___eq__(): - from google.cloud.bigtable.deprecated.enums import Table as enum_table - from google.cloud.bigtable.deprecated.table import ClusterState + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState READY = enum_table.ReplicationState.READY state1 = ClusterState(READY) @@ -2142,8 +2144,8 @@ def test_cluster_state___eq__(): def test_cluster_state___eq__type_differ(): - from google.cloud.bigtable.deprecated.enums import Table as enum_table - from google.cloud.bigtable.deprecated.table import ClusterState + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState READY = enum_table.ReplicationState.READY state1 = ClusterState(READY) @@ -2152,8 +2154,8 @@ def test_cluster_state___eq__type_differ(): def test_cluster_state___ne__same_value(): - from google.cloud.bigtable.deprecated.enums import Table as enum_table - from google.cloud.bigtable.deprecated.table import ClusterState + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState READY = enum_table.ReplicationState.READY state1 = ClusterState(READY) @@ -2162,8 +2164,8 @@ def test_cluster_state___ne__same_value(): def test_cluster_state___ne__(): - from google.cloud.bigtable.deprecated.enums import Table as enum_table - from google.cloud.bigtable.deprecated.table import ClusterState + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState READY = enum_table.ReplicationState.READY INITIALIZING = enum_table.ReplicationState.INITIALIZING @@ -2173,8 +2175,8 @@ def test_cluster_state___ne__(): def test_cluster_state__repr__(): - from google.cloud.bigtable.deprecated.enums import Table as enum_table - from google.cloud.bigtable.deprecated.table import ClusterState + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN INITIALIZING = enum_table.ReplicationState.INITIALIZING